code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""" ) for i in range(__UpperCamelCase ): for j in range(__UpperCamelCase ): if dist[i][j] != float("""inf""" ): print(int(dist[i][j] ) , end="""\t""" ) else: print("""INF""" , end="""\t""" ) print() def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = [[float("""inf""" ) for _ in range(__UpperCamelCase )] for _ in range(__UpperCamelCase )] for i in range(__UpperCamelCase ): for j in range(__UpperCamelCase ): SCREAMING_SNAKE_CASE : List[Any] = graph[i][j] # check vertex k against all other vertices (i, j) for k in range(__UpperCamelCase ): # looping through rows of graph array for i in range(__UpperCamelCase ): # looping through columns of graph array for j in range(__UpperCamelCase ): if ( dist[i][k] != float("""inf""" ) and dist[k][j] != float("""inf""" ) and dist[i][k] + dist[k][j] < dist[i][j] ): SCREAMING_SNAKE_CASE : Optional[Any] = dist[i][k] + dist[k][j] _print_dist(__UpperCamelCase , __UpperCamelCase ) return dist, v if __name__ == "__main__": __UpperCAmelCase = int(input("""Enter number of vertices: """)) __UpperCAmelCase = int(input("""Enter number of edges: """)) __UpperCAmelCase = [[float("""inf""") for i in range(v)] for j in range(v)] for i in range(v): __UpperCAmelCase = 0.0 # src and dst are indices that must be within the array size graph[e][v] # failure to follow this will result in an error for i in range(e): print("""\nEdge """, i + 1) __UpperCAmelCase = int(input("""Enter source:""")) __UpperCAmelCase = int(input("""Enter destination:""")) __UpperCAmelCase = float(input("""Enter weight:""")) __UpperCAmelCase = weight floyd_warshall(graph, v) # Example Input # Enter number of vertices: 3 # Enter number of edges: 2 # # generated graph from vertex and edge inputs # [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]] # [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]] # specify source, destination and weight for edge #1 # Edge 1 # Enter source:1 # Enter destination:2 # Enter weight:2 # specify source, destination and weight for edge #2 # Edge 2 # Enter source:2 # Enter destination:1 # Enter weight:1 # # Expected Output from the vertice, edge and src, dst, weight inputs!! # 0 INF INF # INF 0 2 # INF 1 0
721
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { """microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""", """microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""", """microsoft/deberta-v2-xlarge-mnli""": ( """https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json""" ), """microsoft/deberta-v2-xxlarge-mnli""": ( """https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json""" ), } class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''deberta-v2''' def __init__( self : int , lowerCamelCase_ : Optional[Any]=12_81_00 , lowerCamelCase_ : str=15_36 , lowerCamelCase_ : int=24 , lowerCamelCase_ : List[str]=24 , lowerCamelCase_ : List[Any]=61_44 , lowerCamelCase_ : List[Any]="gelu" , lowerCamelCase_ : Optional[Any]=0.1 , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : str=5_12 , lowerCamelCase_ : str=0 , lowerCamelCase_ : Union[str, Any]=0.02 , lowerCamelCase_ : Dict=1e-7 , lowerCamelCase_ : Optional[int]=False , lowerCamelCase_ : Optional[int]=-1 , lowerCamelCase_ : List[str]=0 , lowerCamelCase_ : Tuple=True , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : Optional[Any]=0 , lowerCamelCase_ : Dict="gelu" , **lowerCamelCase_ : Optional[int] , ): '''simple docstring''' super().__init__(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = hidden_size SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers SCREAMING_SNAKE_CASE : int = num_attention_heads SCREAMING_SNAKE_CASE : List[str] = intermediate_size SCREAMING_SNAKE_CASE : int = hidden_act SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : str = max_position_embeddings SCREAMING_SNAKE_CASE : Tuple = type_vocab_size SCREAMING_SNAKE_CASE : Optional[int] = initializer_range SCREAMING_SNAKE_CASE : List[Any] = relative_attention SCREAMING_SNAKE_CASE : str = max_relative_positions SCREAMING_SNAKE_CASE : int = pad_token_id SCREAMING_SNAKE_CASE : List[str] = position_biased_input # Backwards compatibility if type(lowerCamelCase_ ) == str: SCREAMING_SNAKE_CASE : Dict = [x.strip() for x in pos_att_type.lower().split("""|""" )] SCREAMING_SNAKE_CASE : Any = pos_att_type SCREAMING_SNAKE_CASE : Any = vocab_size SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_eps SCREAMING_SNAKE_CASE : str = kwargs.get("""pooler_hidden_size""" , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = pooler_dropout SCREAMING_SNAKE_CASE : Any = pooler_hidden_act class UpperCamelCase__ ( lowercase_ ): """simple docstring""" @property def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' if self.task == "multiple-choice": SCREAMING_SNAKE_CASE : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: SCREAMING_SNAKE_CASE : Union[str, Any] = {0: """batch""", 1: """sequence"""} if self._config.type_vocab_size > 0: return OrderedDict( [("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] ) else: return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] ) @property def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' return 12 def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCamelCase_ : int = -1 , lowerCamelCase_ : int = -1 , lowerCamelCase_ : int = -1 , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional["TensorType"] = None , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 40 , lowerCamelCase_ : int = 40 , lowerCamelCase_ : "PreTrainedTokenizerBase" = None , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = super().generate_dummy_inputs(preprocessor=lowerCamelCase_ , framework=lowerCamelCase_ ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
79
0
'''simple docstring''' import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = inspect.getfile(accelerate.test_utils ) SCREAMING_SNAKE_CASE : str = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] ) SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_distributed_data_loop.py"""] ) SCREAMING_SNAKE_CASE : int = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_ops.py"""] ) @require_multi_gpu def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' print(f'''Found {torch.cuda.device_count()} devices.''' ) SCREAMING_SNAKE_CASE : Optional[int] = ["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(A_ , env=os.environ.copy() ) @require_multi_gpu def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' print(f'''Found {torch.cuda.device_count()} devices.''' ) SCREAMING_SNAKE_CASE : Dict = ["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path] print(f'''Command: {cmd}''' ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(A_ , env=os.environ.copy() ) @require_multi_gpu def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = ["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(A_ , env=os.environ.copy() ) @require_multi_gpu def lowerCamelCase_ ( self : str ): '''simple docstring''' print(f'''Found {torch.cuda.device_count()} devices, using 2 devices only''' ) SCREAMING_SNAKE_CASE : Optional[Any] = ["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices="""0,1""" ): execute_subprocess_async(A_ , env=os.environ.copy() ) if __name__ == "__main__": __UpperCAmelCase = Accelerator() __UpperCAmelCase = (accelerator.state.process_index + 2, 10) __UpperCAmelCase = torch.randint(0, 10, shape).to(accelerator.device) __UpperCAmelCase = """""" __UpperCAmelCase = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." __UpperCAmelCase = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." __UpperCAmelCase = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
700
'''simple docstring''' from collections import deque from math import floor from random import random from time import time class UpperCamelCase__ : """simple docstring""" def __init__( self : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = {} def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[int]=1 ): '''simple docstring''' if self.graph.get(lowerCamelCase_ ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: SCREAMING_SNAKE_CASE : str = [[w, v]] if not self.graph.get(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Tuple = [] def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' return list(self.graph ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : str ): '''simple docstring''' if self.graph.get(lowerCamelCase_ ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any]=-2 , lowerCamelCase_ : str=-1 ): '''simple docstring''' if s == d: return [] SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : List[str] = [] if s == -2: SCREAMING_SNAKE_CASE : Tuple = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : Optional[Any] = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowerCamelCase_ ) return visited else: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : Tuple = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : int = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : Any = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return visited def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Optional[int]=-1 ): '''simple docstring''' if c == -1: SCREAMING_SNAKE_CASE : str = floor(random() * 1_00_00 ) + 10 for i in range(lowerCamelCase_ ): # every vertex has max 100 edges for _ in range(floor(random() * 1_02 ) + 1 ): SCREAMING_SNAKE_CASE : Union[str, Any] = floor(random() * c ) + 1 if n != i: self.add_pair(lowerCamelCase_ , lowerCamelCase_ , 1 ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Any=-2 ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = deque() SCREAMING_SNAKE_CASE : List[str] = [] if s == -2: SCREAMING_SNAKE_CASE : int = list(self.graph )[0] d.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) while d: SCREAMING_SNAKE_CASE : Dict = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Tuple ): '''simple docstring''' return len(self.graph[u] ) def lowerCamelCase_ ( self : str , lowerCamelCase_ : Optional[Any]=-2 ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = [] SCREAMING_SNAKE_CASE : List[str] = [] if s == -2: SCREAMING_SNAKE_CASE : Union[str, Any] = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = s SCREAMING_SNAKE_CASE : List[str] = [] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : Optional[Any] = s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : int = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : List[Any] = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : int = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return sorted_nodes def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = [] SCREAMING_SNAKE_CASE : Any = [] SCREAMING_SNAKE_CASE : List[Any] = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = -2 SCREAMING_SNAKE_CASE : int = [] SCREAMING_SNAKE_CASE : Union[str, Any] = s SCREAMING_SNAKE_CASE : Tuple = False SCREAMING_SNAKE_CASE : Union[str, Any] = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : Union[str, Any] = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): SCREAMING_SNAKE_CASE : Any = len(lowerCamelCase_ ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : int = node[1] break # check if all the children are visited if s == ss: stack.pop() SCREAMING_SNAKE_CASE : int = True if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : Any = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : List[str] = False indirect_parents.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = s SCREAMING_SNAKE_CASE : List[Any] = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return list(lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = [] SCREAMING_SNAKE_CASE : int = [] SCREAMING_SNAKE_CASE : Any = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = -2 SCREAMING_SNAKE_CASE : List[Any] = [] SCREAMING_SNAKE_CASE : Tuple = s SCREAMING_SNAKE_CASE : Dict = False SCREAMING_SNAKE_CASE : Optional[Any] = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : str = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): SCREAMING_SNAKE_CASE : str = len(lowerCamelCase_ ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : Optional[Any] = node[1] break # check if all the children are visited if s == ss: stack.pop() SCREAMING_SNAKE_CASE : Dict = True if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : List[str] = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : List[Any] = False indirect_parents.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = s SCREAMING_SNAKE_CASE : Optional[int] = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return False def lowerCamelCase_ ( self : Any , lowerCamelCase_ : str=-2 , lowerCamelCase_ : int=-1 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = time() self.dfs(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = time() return end - begin def lowerCamelCase_ ( self : int , lowerCamelCase_ : Tuple=-2 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = time() self.bfs(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = time() return end - begin class UpperCamelCase__ : """simple docstring""" def __init__( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = {} def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Union[str, Any]=1 ): '''simple docstring''' if self.graph.get(lowerCamelCase_ ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist SCREAMING_SNAKE_CASE : Any = [[w, v]] # add the other way if self.graph.get(lowerCamelCase_ ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist SCREAMING_SNAKE_CASE : Any = [[w, u]] def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any ): '''simple docstring''' if self.graph.get(lowerCamelCase_ ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowerCamelCase_ ) # the other way round if self.graph.get(lowerCamelCase_ ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(lowerCamelCase_ ) def lowerCamelCase_ ( self : int , lowerCamelCase_ : str=-2 , lowerCamelCase_ : List[str]=-1 ): '''simple docstring''' if s == d: return [] SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : Any = [] if s == -2: SCREAMING_SNAKE_CASE : List[Any] = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : Union[str, Any] = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowerCamelCase_ ) return visited else: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : Any = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : Any = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : List[str] = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return visited def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[str]=-1 ): '''simple docstring''' if c == -1: SCREAMING_SNAKE_CASE : Any = floor(random() * 1_00_00 ) + 10 for i in range(lowerCamelCase_ ): # every vertex has max 100 edges for _ in range(floor(random() * 1_02 ) + 1 ): SCREAMING_SNAKE_CASE : List[str] = floor(random() * c ) + 1 if n != i: self.add_pair(lowerCamelCase_ , lowerCamelCase_ , 1 ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[Any]=-2 ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = deque() SCREAMING_SNAKE_CASE : Tuple = [] if s == -2: SCREAMING_SNAKE_CASE : Any = list(self.graph )[0] d.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) while d: SCREAMING_SNAKE_CASE : List[Any] = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : str ): '''simple docstring''' return len(self.graph[u] ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : Any = [] SCREAMING_SNAKE_CASE : Optional[Any] = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = -2 SCREAMING_SNAKE_CASE : Tuple = [] SCREAMING_SNAKE_CASE : Any = s SCREAMING_SNAKE_CASE : Optional[Any] = False SCREAMING_SNAKE_CASE : Optional[Any] = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : Optional[Any] = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCamelCase_ ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : str = node[1] break # check if all the children are visited if s == ss: stack.pop() SCREAMING_SNAKE_CASE : Optional[int] = True if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : int = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : Union[str, Any] = False indirect_parents.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = s SCREAMING_SNAKE_CASE : str = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return list(lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = [] SCREAMING_SNAKE_CASE : int = [] SCREAMING_SNAKE_CASE : Any = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = -2 SCREAMING_SNAKE_CASE : int = [] SCREAMING_SNAKE_CASE : int = s SCREAMING_SNAKE_CASE : Union[str, Any] = False SCREAMING_SNAKE_CASE : Tuple = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : Any = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCamelCase_ ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : Tuple = node[1] break # check if all the children are visited if s == ss: stack.pop() SCREAMING_SNAKE_CASE : Any = True if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : str = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : Optional[Any] = False indirect_parents.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = s SCREAMING_SNAKE_CASE : Tuple = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return False def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return list(self.graph ) def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[str]=-2 , lowerCamelCase_ : str=-1 ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = time() self.dfs(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = time() return end - begin def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Dict=-2 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = time() self.bfs(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = time() return end - begin
79
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { """uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""", } class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = 'mra' def __init__( self : Union[str, Any] , lowerCamelCase_ : Any=5_02_65 , lowerCamelCase_ : str=7_68 , lowerCamelCase_ : int=12 , lowerCamelCase_ : int=12 , lowerCamelCase_ : int=30_72 , lowerCamelCase_ : List[str]="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : Any=5_12 , lowerCamelCase_ : int=1 , lowerCamelCase_ : List[str]=0.02 , lowerCamelCase_ : int=1e-5 , lowerCamelCase_ : Dict="absolute" , lowerCamelCase_ : List[Any]=4 , lowerCamelCase_ : Any="full" , lowerCamelCase_ : List[Any]=0 , lowerCamelCase_ : str=0 , lowerCamelCase_ : int=1 , lowerCamelCase_ : Optional[int]=0 , lowerCamelCase_ : Optional[int]=2 , **lowerCamelCase_ : str , ): '''simple docstring''' super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings SCREAMING_SNAKE_CASE : List[str] = hidden_size SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers SCREAMING_SNAKE_CASE : Tuple = num_attention_heads SCREAMING_SNAKE_CASE : Tuple = intermediate_size SCREAMING_SNAKE_CASE : int = hidden_act SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : str = initializer_range SCREAMING_SNAKE_CASE : Union[str, Any] = type_vocab_size SCREAMING_SNAKE_CASE : str = layer_norm_eps SCREAMING_SNAKE_CASE : Optional[Any] = position_embedding_type SCREAMING_SNAKE_CASE : Any = block_per_row SCREAMING_SNAKE_CASE : Union[str, Any] = approx_mode SCREAMING_SNAKE_CASE : Optional[Any] = initial_prior_first_n_blocks SCREAMING_SNAKE_CASE : str = initial_prior_diagonal_n_blocks
701
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {"""vocab_file""": """sentencepiece.bpe.model"""} __UpperCAmelCase = { """vocab_file""": { """moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez-orangesum-title""": ( """https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model""" ), }, } __UpperCAmelCase = { """moussaKam/mbarthez""": 1024, """moussaKam/barthez""": 1024, """moussaKam/barthez-orangesum-title""": 1024, } __UpperCAmelCase = """▁""" class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ = ['''input_ids''', '''attention_mask'''] def __init__( self : Optional[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple="<s>" , lowerCamelCase_ : Union[str, Any]="</s>" , lowerCamelCase_ : Tuple="</s>" , lowerCamelCase_ : str="<s>" , lowerCamelCase_ : Optional[int]="<unk>" , lowerCamelCase_ : List[Any]="<pad>" , lowerCamelCase_ : Optional[Any]="<mask>" , lowerCamelCase_ : Optional[Dict[str, Any]] = None , **lowerCamelCase_ : List[str] , ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token SCREAMING_SNAKE_CASE : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Dict = vocab_file SCREAMING_SNAKE_CASE : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Optional[int] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3} SCREAMING_SNAKE_CASE : str = len(self.sp_model ) - 1 SCREAMING_SNAKE_CASE : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id] SCREAMING_SNAKE_CASE : int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase_ )) + [1] return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1] def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id] SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' return len(self.sp_model ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ): '''simple docstring''' return self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : str ): '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] SCREAMING_SNAKE_CASE : List[str] = self.sp_model.PieceToId(lowerCamelCase_ ) return spm_id if spm_id else self.unk_token_id def lowerCamelCase_ ( self : int , lowerCamelCase_ : List[str] ): '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = [] SCREAMING_SNAKE_CASE : Tuple = """""" SCREAMING_SNAKE_CASE : Dict = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(lowerCamelCase_ ) + token SCREAMING_SNAKE_CASE : str = True SCREAMING_SNAKE_CASE : Optional[Any] = [] else: current_sub_tokens.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = False out_string += self.sp_model.decode(lowerCamelCase_ ) return out_string.strip() def __getstate__( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.__dict__.copy() SCREAMING_SNAKE_CASE : List[Any] = None return state def __setstate__( self : Optional[Any] , lowerCamelCase_ : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): SCREAMING_SNAKE_CASE : int = {} SCREAMING_SNAKE_CASE : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(lowerCamelCase_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return SCREAMING_SNAKE_CASE : Dict = os.path.join( lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCamelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(lowerCamelCase_ , """wb""" ) as fi: SCREAMING_SNAKE_CASE : Any = self.sp_model.serialized_model_proto() fi.write(lowerCamelCase_ ) return (out_vocab_file,)
79
0
'''simple docstring''' from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''vqvae'''] def __init__( self : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : Dict , lowerCamelCase_ : List[str] , ): '''simple docstring''' super().__init__() self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , mel=lowerCamelCase_ , vqvae=lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' return 50 if isinstance(self.scheduler , lowerCamelCase_ ) else 10_00 @torch.no_grad() def __call__( self : str , lowerCamelCase_ : Union[str, Any] = 1 , lowerCamelCase_ : Any = None , lowerCamelCase_ : Union[str, Any] = None , lowerCamelCase_ : Any = 0 , lowerCamelCase_ : Dict = 0 , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : str = 0 , lowerCamelCase_ : str = 0 , lowerCamelCase_ : Union[str, Any] = None , lowerCamelCase_ : List[str] = 0 , lowerCamelCase_ : Any = None , lowerCamelCase_ : List[Any] = None , lowerCamelCase_ : Any=True , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = steps or self.get_default_steps() self.scheduler.set_timesteps(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: SCREAMING_SNAKE_CASE : Union[str, Any] = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: SCREAMING_SNAKE_CASE : List[Any] = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=lowerCamelCase_ , device=self.device , ) SCREAMING_SNAKE_CASE : Optional[int] = noise SCREAMING_SNAKE_CASE : Optional[int] = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = self.mel.audio_slice_to_image(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = np.frombuffer(input_image.tobytes() , dtype="""uint8""" ).reshape( (input_image.height, input_image.width) ) SCREAMING_SNAKE_CASE : Any = (input_image / 2_55) * 2 - 1 SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device ) if self.vqvae is not None: SCREAMING_SNAKE_CASE : List[Any] = self.vqvae.encode(torch.unsqueeze(lowerCamelCase_ , 0 ) ).latent_dist.sample( generator=lowerCamelCase_ )[0] SCREAMING_SNAKE_CASE : List[str] = self.vqvae.config.scaling_factor * input_images if start_step > 0: SCREAMING_SNAKE_CASE : Any = self.scheduler.add_noise(lowerCamelCase_ , lowerCamelCase_ , self.scheduler.timesteps[start_step - 1] ) SCREAMING_SNAKE_CASE : int = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) SCREAMING_SNAKE_CASE : List[Any] = int(mask_start_secs * pixels_per_second ) SCREAMING_SNAKE_CASE : Dict = int(mask_end_secs * pixels_per_second ) SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler.add_noise(lowerCamelCase_ , lowerCamelCase_ , torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet , lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Optional[Any] = self.unet(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )["""sample"""] else: SCREAMING_SNAKE_CASE : int = self.unet(lowerCamelCase_ , lowerCamelCase_ )["""sample"""] if isinstance(self.scheduler , lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Any = self.scheduler.step( model_output=lowerCamelCase_ , timestep=lowerCamelCase_ , sample=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , )["""prev_sample"""] else: SCREAMING_SNAKE_CASE : Any = self.scheduler.step( model_output=lowerCamelCase_ , timestep=lowerCamelCase_ , sample=lowerCamelCase_ , generator=lowerCamelCase_ , )["""prev_sample"""] if mask is not None: if mask_start > 0: SCREAMING_SNAKE_CASE : List[str] = mask[:, step, :, :mask_start] if mask_end > 0: SCREAMING_SNAKE_CASE : List[Any] = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance SCREAMING_SNAKE_CASE : Any = 1 / self.vqvae.config.scaling_factor * images SCREAMING_SNAKE_CASE : Any = self.vqvae.decode(lowerCamelCase_ )["""sample"""] SCREAMING_SNAKE_CASE : List[Any] = (images / 2 + 0.5).clamp(0 , 1 ) SCREAMING_SNAKE_CASE : Dict = images.cpu().permute(0 , 2 , 3 , 1 ).numpy() SCREAMING_SNAKE_CASE : str = (images * 2_55).round().astype("""uint8""" ) SCREAMING_SNAKE_CASE : List[str] = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(lowerCamelCase_ , mode="""RGB""" ).convert("""L""" ) for _ in images) ) SCREAMING_SNAKE_CASE : Optional[Any] = [self.mel.image_to_audio(lowerCamelCase_ ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(lowerCamelCase_ )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowerCamelCase_ ) ) @torch.no_grad() def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Union[str, Any] = 50 ): '''simple docstring''' assert isinstance(self.scheduler , lowerCamelCase_ ) self.scheduler.set_timesteps(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = np.array( [np.frombuffer(image.tobytes() , dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] ) SCREAMING_SNAKE_CASE : int = (sample / 2_55) * 2 - 1 SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Tensor(lowerCamelCase_ ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ): SCREAMING_SNAKE_CASE : List[str] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps SCREAMING_SNAKE_CASE : Tuple = self.scheduler.alphas_cumprod[t] SCREAMING_SNAKE_CASE : Optional[int] = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) SCREAMING_SNAKE_CASE : List[Any] = 1 - alpha_prod_t SCREAMING_SNAKE_CASE : int = self.unet(lowerCamelCase_ , lowerCamelCase_ )["""sample"""] SCREAMING_SNAKE_CASE : List[str] = (1 - alpha_prod_t_prev) ** 0.5 * model_output SCREAMING_SNAKE_CASE : Optional[Any] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) SCREAMING_SNAKE_CASE : List[str] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def lowerCamelCase_ ( lowerCamelCase_ : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = acos(torch.dot(torch.flatten(lowerCamelCase_ ) , torch.flatten(lowerCamelCase_ ) ) / torch.norm(lowerCamelCase_ ) / torch.norm(lowerCamelCase_ ) ) return sin((1 - alpha) * theta ) * xa / sin(lowerCamelCase_ ) + sin(alpha * theta ) * xa / sin(lowerCamelCase_ )
702
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFXLMRobertaModel @require_tf @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = TFXLMRobertaModel.from_pretrained("""jplu/tf-xlm-roberta-base""" ) SCREAMING_SNAKE_CASE : Dict = { """input_ids""": tf.convert_to_tensor([[0, 26_46, 1_02_69, 83, 9_99_42, 2]] , dtype=tf.intaa ), # "My dog is cute" """attention_mask""": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ), } SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ )["""last_hidden_state"""] SCREAMING_SNAKE_CASE : Union[str, Any] = tf.TensorShape((1, 6, 7_68) ) self.assertEqual(output.shape , lowerCamelCase_ ) # compare the actual values for a slice. SCREAMING_SNAKE_CASE : Optional[int] = tf.convert_to_tensor( [ [ [0.0_681_762, 0.10_894_451, 0.06_772_504], [-0.06_423_668, 0.02_366_615, 0.04_329_344], [-0.06_057_295, 0.09_974_135, -0.00_070_584], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
79
0
'''simple docstring''' def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = 0 for ch in input_str: SCREAMING_SNAKE_CASE : Optional[int] = ord(snake_case_ ) SCREAMING_SNAKE_CASE : List[Any] = pow(2 , snake_case_ ) # If we already turned on bit for current character's unicode if bitmap >> ch_unicode & 1 == 1: return False bitmap |= ch_bit_index_on return True if __name__ == "__main__": import doctest doctest.testmod()
703
'''simple docstring''' from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = None # Automatically constructed SCREAMING_SNAKE_CASE__ = "dict" SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = field(default='''Translation''' , init=lowercase_ , repr=lowercase_ ) def __call__( self : int ): '''simple docstring''' return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' from .features import Value return {k: Value("""string""" ) for k in sorted(self.languages )} @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None # Automatically constructed SCREAMING_SNAKE_CASE__ = "dict" SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = field(default='''TranslationVariableLanguages''' , init=lowercase_ , repr=lowercase_ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = sorted(set(self.languages ) ) if self.languages else None SCREAMING_SNAKE_CASE : str = len(self.languages ) if self.languages else None def __call__( self : Tuple ): '''simple docstring''' return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = set(self.languages ) if self.languages and set(lowerCamelCase_ ) - lang_set: raise ValueError( f'''Some languages in example ({", ".join(sorted(set(lowerCamelCase_ ) - lang_set ) )}) are not in valid set ({", ".join(lowerCamelCase_ )}).''' ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. SCREAMING_SNAKE_CASE : List[Any] = [] for lang, text in translation_dict.items(): if isinstance(lowerCamelCase_ , lowerCamelCase_ ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = zip(*sorted(lowerCamelCase_ ) ) return {"language": languages, "translation": translations} def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' from .features import Sequence, Value return { "language": Sequence(Value("""string""" ) ), "translation": Sequence(Value("""string""" ) ), }
79
0
'''simple docstring''' from __future__ import annotations import numpy as np def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : int = np.shape(_lowerCamelCase ) if rows != columns: SCREAMING_SNAKE_CASE : List[str] = ( "'table' has to be of square shaped array but got a " f'''{rows}x{columns} array:\n{table}''' ) raise ValueError(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Dict = np.zeros((rows, columns) ) SCREAMING_SNAKE_CASE : Union[str, Any] = np.zeros((rows, columns) ) for i in range(_lowerCamelCase ): for j in range(_lowerCamelCase ): SCREAMING_SNAKE_CASE : Dict = sum(lower[i][k] * upper[k][j] for k in range(_lowerCamelCase ) ) if upper[j][j] == 0: raise ArithmeticError("""No LU decomposition exists""" ) SCREAMING_SNAKE_CASE : Any = (table[i][j] - total) / upper[j][j] SCREAMING_SNAKE_CASE : Tuple = 1 for j in range(_lowerCamelCase , _lowerCamelCase ): SCREAMING_SNAKE_CASE : Optional[Any] = sum(lower[i][k] * upper[k][j] for k in range(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : Any = table[i][j] - total return lower, upper if __name__ == "__main__": import doctest doctest.testmod()
704
'''simple docstring''' import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor __UpperCAmelCase = logging.get_logger(__name__) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Dict , *lowerCamelCase_ : List[str] , **lowerCamelCase_ : Dict ): '''simple docstring''' warnings.warn( """The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use FlavaImageProcessor instead.""" , lowerCamelCase_ , ) super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
79
0
'''simple docstring''' def __A ( lowerCamelCase_ = 1_00 ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = n * (n + 1) * (2 * n + 1) / 6 SCREAMING_SNAKE_CASE : str = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(f'''{solution() = }''')
705
'''simple docstring''' from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging __UpperCAmelCase = logging.get_logger(__name__) class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = None @staticmethod def lowerCamelCase_ ( ): '''simple docstring''' raise NotImplementedError def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : str ): '''simple docstring''' raise NotImplementedError def lowerCamelCase_ ( self : int , lowerCamelCase_ : Dict ): '''simple docstring''' raise NotImplementedError def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' if not self.is_available(): raise RuntimeError( f'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' ) @classmethod def lowerCamelCase_ ( cls : Any ): '''simple docstring''' return f'''`pip install {cls.pip_package or cls.name}`''' class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''optuna''' @staticmethod def lowerCamelCase_ ( ): '''simple docstring''' return is_optuna_available() def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : Dict ): '''simple docstring''' return run_hp_search_optuna(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) def lowerCamelCase_ ( self : str , lowerCamelCase_ : Any ): '''simple docstring''' return default_hp_space_optuna(lowerCamelCase_ ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''ray''' SCREAMING_SNAKE_CASE__ = '''\'ray[tune]\'''' @staticmethod def lowerCamelCase_ ( ): '''simple docstring''' return is_ray_available() def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : int ): '''simple docstring''' return run_hp_search_ray(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Optional[int] ): '''simple docstring''' return default_hp_space_ray(lowerCamelCase_ ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''sigopt''' @staticmethod def lowerCamelCase_ ( ): '''simple docstring''' return is_sigopt_available() def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : int ): '''simple docstring''' return run_hp_search_sigopt(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' return default_hp_space_sigopt(lowerCamelCase_ ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''wandb''' @staticmethod def lowerCamelCase_ ( ): '''simple docstring''' return is_wandb_available() def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : Optional[Any] ): '''simple docstring''' return run_hp_search_wandb(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Tuple ): '''simple docstring''' return default_hp_space_wandb(lowerCamelCase_ ) __UpperCAmelCase = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(lowerCamelCase_ ) > 0: SCREAMING_SNAKE_CASE : List[Any] = available_backends[0].name if len(lowerCamelCase_ ) > 1: logger.info( f'''{len(lowerCamelCase_ )} hyperparameter search backends available. Using {name} as the default.''' ) return name raise RuntimeError( """No hyperparameter search backend available.\n""" + """\n""".join( f''' - To install {backend.name} run {backend.pip_install()}''' for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
79
0
'''simple docstring''' import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin __UpperCAmelCase = """ Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning. In March 2021, Hugging Face raised $40 million in a Series B funding round.[3] On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5] """ class UpperCamelCase__ ( unittest.TestCase , __a ): """simple docstring""" def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = load_tool("""text-question-answering""" ) self.tool.setup() SCREAMING_SNAKE_CASE : Any = load_tool("""text-question-answering""" , remote=a_ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.tool(a_ , """What did Hugging Face do in April 2021?""" ) self.assertEqual(a_ , """launched the BigScience Research Workshop""" ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = self.remote_tool(a_ , """What did Hugging Face do in April 2021?""" ) self.assertEqual(a_ , """launched the BigScience Research Workshop""" ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.tool(text=a_ , question="""What did Hugging Face do in April 2021?""" ) self.assertEqual(a_ , """launched the BigScience Research Workshop""" ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.remote_tool(text=a_ , question="""What did Hugging Face do in April 2021?""" ) self.assertEqual(a_ , """launched the BigScience Research Workshop""" )
706
'''simple docstring''' import glob import os import random from string import ascii_lowercase, digits import cva __UpperCAmelCase = """""" __UpperCAmelCase = """""" __UpperCAmelCase = """""" __UpperCAmelCase = 1 # (0 is vertical, 1 is horizontal) def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = get_dataset(lowerCamelCase_ , lowerCamelCase_ ) print("""Processing...""" ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = update_image_and_anno(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) for index, image in enumerate(lowerCamelCase_ ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' SCREAMING_SNAKE_CASE : Optional[int] = random_chars(32 ) SCREAMING_SNAKE_CASE : Optional[Any] = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0] SCREAMING_SNAKE_CASE : Dict = f'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}''' cva.imwrite(f'''/{file_root}.jpg''' , lowerCamelCase_ , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f'''Success {index+1}/{len(lowerCamelCase_ )} with {file_name}''' ) SCREAMING_SNAKE_CASE : Optional[Any] = [] for anno in new_annos[index]: SCREAMING_SNAKE_CASE : Optional[Any] = f'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}''' annos_list.append(lowerCamelCase_ ) with open(f'''/{file_root}.txt''' , """w""" ) as outfile: outfile.write("""\n""".join(line for line in annos_list ) ) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = [] SCREAMING_SNAKE_CASE : Any = [] for label_file in glob.glob(os.path.join(lowerCamelCase_ , """*.txt""" ) ): SCREAMING_SNAKE_CASE : str = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0] with open(lowerCamelCase_ ) as in_file: SCREAMING_SNAKE_CASE : Any = in_file.readlines() SCREAMING_SNAKE_CASE : List[Any] = os.path.join(lowerCamelCase_ , f'''{label_name}.jpg''' ) SCREAMING_SNAKE_CASE : Tuple = [] for obj_list in obj_lists: SCREAMING_SNAKE_CASE : Union[str, Any] = obj_list.rstrip("""\n""" ).split(""" """ ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(lowerCamelCase_ ) labels.append(lowerCamelCase_ ) return img_paths, labels def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = [] SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : Optional[Any] = [] for idx in range(len(lowerCamelCase_ ) ): SCREAMING_SNAKE_CASE : List[Any] = [] SCREAMING_SNAKE_CASE : Dict = img_list[idx] path_list.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = anno_list[idx] SCREAMING_SNAKE_CASE : Optional[Any] = cva.imread(lowerCamelCase_ ) if flip_type == 1: SCREAMING_SNAKE_CASE : List[str] = cva.flip(lowerCamelCase_ , lowerCamelCase_ ) for bbox in img_annos: SCREAMING_SNAKE_CASE : List[Any] = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: SCREAMING_SNAKE_CASE : Any = cva.flip(lowerCamelCase_ , lowerCamelCase_ ) for bbox in img_annos: SCREAMING_SNAKE_CASE : Optional[Any] = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(lowerCamelCase_ ) new_imgs_list.append(lowerCamelCase_ ) return new_imgs_list, new_annos_lists, path_list def __A ( lowerCamelCase_ = 32 ): """simple docstring""" assert number_char > 1, "The number of character should greater than 1" SCREAMING_SNAKE_CASE : Dict = ascii_lowercase + digits return "".join(random.choice(lowerCamelCase_ ) for _ in range(lowerCamelCase_ ) ) if __name__ == "__main__": main() print("""DONE ✅""")
79
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available __UpperCAmelCase = {"""tokenization_herbert""": ["""HerbertTokenizer"""]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ["""HerbertTokenizerFast"""] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
707
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { """google/vivit-b-16x2-kinetics400""": ( """https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json""" ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''vivit''' def __init__( self : Tuple , lowerCamelCase_ : str=2_24 , lowerCamelCase_ : List[Any]=32 , lowerCamelCase_ : Tuple=[2, 16, 16] , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Dict=7_68 , lowerCamelCase_ : Dict=12 , lowerCamelCase_ : Any=12 , lowerCamelCase_ : List[Any]=30_72 , lowerCamelCase_ : List[str]="gelu_fast" , lowerCamelCase_ : str=0.0 , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : Optional[int]=0.02 , lowerCamelCase_ : List[Any]=1e-06 , lowerCamelCase_ : Tuple=True , **lowerCamelCase_ : Tuple , ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = hidden_size SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers SCREAMING_SNAKE_CASE : List[str] = num_attention_heads SCREAMING_SNAKE_CASE : str = intermediate_size SCREAMING_SNAKE_CASE : List[Any] = hidden_act SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Tuple = initializer_range SCREAMING_SNAKE_CASE : str = layer_norm_eps SCREAMING_SNAKE_CASE : str = image_size SCREAMING_SNAKE_CASE : Dict = num_frames SCREAMING_SNAKE_CASE : Optional[Any] = tubelet_size SCREAMING_SNAKE_CASE : Dict = num_channels SCREAMING_SNAKE_CASE : int = qkv_bias super().__init__(**lowerCamelCase_ )
79
0
'''simple docstring''' def __A ( lowerCamelCase_ ): """simple docstring""" assert ( isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and number_of_steps > 0 ), f'''number_of_steps needs to be positive integer, your input {number_of_steps}''' if number_of_steps == 1: return 1 SCREAMING_SNAKE_CASE : int = 1, 1 for _ in range(number_of_steps - 1 ): SCREAMING_SNAKE_CASE : Optional[Any] = current + previous, current return current if __name__ == "__main__": import doctest doctest.testmod()
708
'''simple docstring''' import math class UpperCamelCase__ : """simple docstring""" def __init__( self : List[str] , lowerCamelCase_ : Tuple=0 ): # a graph with Node 0,1,...,N-1 '''simple docstring''' SCREAMING_SNAKE_CASE : Any = n SCREAMING_SNAKE_CASE : Optional[int] = [ [math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ ) ] # adjacency matrix for weight SCREAMING_SNAKE_CASE : Union[str, Any] = [ [math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ ) ] # dp[i][j] stores minimum distance from i to j def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = w def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): SCREAMING_SNAKE_CASE : Union[str, Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' return self.dp[u][v] if __name__ == "__main__": __UpperCAmelCase = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
79
0
'''simple docstring''' import argparse import os import subprocess from packaging.version import Version, parse from accelerate.commands.config.config_args import default_config_file, load_config_from_file __UpperCAmelCase = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.''' def __A ( lowerCamelCase_=None ): """simple docstring""" if subparsers is not None: SCREAMING_SNAKE_CASE : int = subparsers.add_parser("""tpu-config""" , description=_description ) else: SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser("""Accelerate tpu-config command""" , description=_description ) # Core arguments SCREAMING_SNAKE_CASE : Dict = parser.add_argument_group( """Config Arguments""" , """Arguments that can be configured through `accelerate config`.""" ) config_args.add_argument( """--config_file""" , type=_lowercase , default=_lowercase , help="""Path to the config file to use for accelerate.""" , ) config_args.add_argument( """--tpu_name""" , default=_lowercase , help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""" , ) config_args.add_argument( """--tpu_zone""" , default=_lowercase , help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""" , ) SCREAMING_SNAKE_CASE : Any = parser.add_argument_group("""TPU Arguments""" , """Arguments for options ran inside the TPU.""" ) pod_args.add_argument( """--use_alpha""" , action="""store_true""" , help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""" , ) pod_args.add_argument( """--command_file""" , default=_lowercase , help="""The path to the file containing the commands to run on the pod on startup.""" , ) pod_args.add_argument( """--command""" , action="""append""" , nargs="""+""" , help="""A command to run on the pod. Can be passed multiple times.""" , ) pod_args.add_argument( """--install_accelerate""" , action="""store_true""" , help="""Whether to install accelerate on the pod. Defaults to False.""" , ) pod_args.add_argument( """--accelerate_version""" , default="""latest""" , help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.""" , ) pod_args.add_argument( """--debug""" , action="""store_true""" , help="""If set, will print the command that would be run instead of running it.""" ) if subparsers is not None: parser.set_defaults(func=_lowercase ) return parser def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = None # Get the default from the config file if it exists. if args.config_file is not None or os.path.isfile(_lowercase ): SCREAMING_SNAKE_CASE : Optional[Any] = load_config_from_file(args.config_file ) if not args.command_file and defaults.command_file is not None and not args.command: SCREAMING_SNAKE_CASE : int = defaults.command_file if not args.command and defaults.commands is not None: SCREAMING_SNAKE_CASE : Union[str, Any] = defaults.commands if not args.tpu_name: SCREAMING_SNAKE_CASE : int = defaults.tpu_name if not args.tpu_zone: SCREAMING_SNAKE_CASE : Union[str, Any] = defaults.tpu_zone if args.accelerate_version == "dev": SCREAMING_SNAKE_CASE : int = "git+https://github.com/huggingface/accelerate.git" elif args.accelerate_version == "latest": SCREAMING_SNAKE_CASE : Optional[Any] = "accelerate -U" elif isinstance(parse(args.accelerate_version ) , _lowercase ): SCREAMING_SNAKE_CASE : Optional[Any] = f'''accelerate=={args.accelerate_version}''' if not args.command_file and not args.command: raise ValueError("""You must specify either a command file or a command to run on the pod.""" ) if args.command_file: with open(args.command_file , """r""" ) as f: SCREAMING_SNAKE_CASE : int = [f.read().splitlines()] # To turn list of lists into list of strings if isinstance(args.command[0] , _lowercase ): SCREAMING_SNAKE_CASE : Union[str, Any] = [line for cmd in args.command for line in cmd] # Default to the shared folder and install accelerate SCREAMING_SNAKE_CASE : Tuple = ["cd /usr/share"] if args.install_accelerate: new_cmd += [f'''pip install {args.accelerate_version}'''] new_cmd += args.command SCREAMING_SNAKE_CASE : List[Any] = "; ".join(_lowercase ) # Then send it to gcloud # Eventually try to use google-api-core to do this instead of subprocess SCREAMING_SNAKE_CASE : str = ["gcloud"] if args.use_alpha: cmd += ["alpha"] cmd += [ "compute", "tpus", "tpu-vm", "ssh", args.tpu_name, "--zone", args.tpu_zone, "--command", args.command, "--worker", "all", ] if args.debug: print(f'''Running {" ".join(_lowercase )}''' ) return subprocess.run(_lowercase ) print("""Successfully setup pod.""" ) def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = tpu_command_parser() SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args() tpu_command_launcher(_lowercase )
709
'''simple docstring''' import math def __A ( lowerCamelCase_ ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __A ( lowerCamelCase_ = 1_00_01 ): """simple docstring""" try: SCREAMING_SNAKE_CASE : Tuple = int(lowerCamelCase_ ) except (TypeError, ValueError): raise TypeError("""Parameter nth must be int or castable to int.""" ) from None if nth <= 0: raise ValueError("""Parameter nth must be greater than or equal to one.""" ) SCREAMING_SNAKE_CASE : list[int] = [] SCREAMING_SNAKE_CASE : Dict = 2 while len(lowerCamelCase_ ) < nth: if is_prime(lowerCamelCase_ ): primes.append(lowerCamelCase_ ) num += 1 else: num += 1 return primes[len(lowerCamelCase_ ) - 1] if __name__ == "__main__": print(f'''{solution() = }''')
79
0
import numpy as np import datasets __UpperCAmelCase = ''' Compute the Mahalanobis Distance Mahalonobis distance is the distance between a point and a distribution. And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance. It was introduced by Prof. P. C. Mahalanobis in 1936 and has been used in various statistical applications ever since [source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/] ''' __UpperCAmelCase = '''\ @article{de2000mahalanobis, title={The mahalanobis distance}, author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L}, journal={Chemometrics and intelligent laboratory systems}, volume={50}, number={1}, pages={1--18}, year={2000}, publisher={Elsevier} } ''' __UpperCAmelCase = ''' Args: X: List of datapoints to be compared with the `reference_distribution`. reference_distribution: List of datapoints from the reference distribution we want to compare to. Returns: mahalanobis: The Mahalonobis distance for each datapoint in `X`. Examples: >>> mahalanobis_metric = datasets.load_metric("mahalanobis") >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]]) >>> print(results) {\'mahalanobis\': array([0.5])} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase__ ( datasets.Metric ): """simple docstring""" def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """X""": datasets.Sequence(datasets.Value("""float""" , id="""sequence""" ) , id="""X""" ), } ) , ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Dict , lowerCamelCase_ : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = np.array(__lowerCAmelCase ) SCREAMING_SNAKE_CASE : int = np.array(__lowerCAmelCase ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError("""Expected `X` to be a 2D vector""" ) if len(reference_distribution.shape ) != 2: raise ValueError("""Expected `reference_distribution` to be a 2D vector""" ) if reference_distribution.shape[0] < 2: raise ValueError( """Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension""" ) # Get mahalanobis distance for each prediction SCREAMING_SNAKE_CASE : str = X - np.mean(__lowerCAmelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = np.cov(reference_distribution.T ) try: SCREAMING_SNAKE_CASE : Optional[int] = np.linalg.inv(__lowerCAmelCase ) except np.linalg.LinAlgError: SCREAMING_SNAKE_CASE : Any = np.linalg.pinv(__lowerCAmelCase ) SCREAMING_SNAKE_CASE : List[Any] = np.dot(__lowerCAmelCase , __lowerCAmelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = np.dot(__lowerCAmelCase , X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
710
'''simple docstring''' from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent __UpperCAmelCase = {"""UserAgent""": UserAgent().random} def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = script.contents[0] SCREAMING_SNAKE_CASE : int = json.loads(data[data.find("""{\"config\"""" ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class UpperCamelCase__ : """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = f'''https://www.instagram.com/{username}/''' SCREAMING_SNAKE_CASE : Any = self.get_json() def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = requests.get(self.url , headers=lowerCamelCase_ ).text SCREAMING_SNAKE_CASE : List[Any] = BeautifulSoup(lowerCamelCase_ , """html.parser""" ).find_all("""script""" ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__( self : Dict ): '''simple docstring''' return f'''{self.__class__.__name__}(\'{self.username}\')''' def __str__( self : int ): '''simple docstring''' return f'''{self.fullname} ({self.username}) is {self.biography}''' @property def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' return self.user_data["username"] @property def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' return self.user_data["full_name"] @property def lowerCamelCase_ ( self : int ): '''simple docstring''' return self.user_data["biography"] @property def lowerCamelCase_ ( self : str ): '''simple docstring''' return self.user_data["business_email"] @property def lowerCamelCase_ ( self : str ): '''simple docstring''' return self.user_data["external_url"] @property def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return self.user_data["edge_followed_by"]["count"] @property def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return self.user_data["edge_follow"]["count"] @property def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return self.user_data["edge_owner_to_timeline_media"]["count"] @property def lowerCamelCase_ ( self : str ): '''simple docstring''' return self.user_data["profile_pic_url_hd"] @property def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return self.user_data["is_verified"] @property def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' return self.user_data["is_private"] def __A ( lowerCamelCase_ = "github" ): """simple docstring""" import os if os.environ.get("""CI""" ): return # test failing on GitHub Actions SCREAMING_SNAKE_CASE : Any = InstagramUser(lowerCamelCase_ ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , lowerCamelCase_ ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 1_50 assert instagram_user.number_of_followers > 12_00_00 assert instagram_user.number_of_followings > 15 assert instagram_user.email == "support@github.com" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith("""https://instagram.""" ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() __UpperCAmelCase = InstagramUser("""github""") print(instagram_user) print(f'''{instagram_user.number_of_posts = }''') print(f'''{instagram_user.number_of_followers = }''') print(f'''{instagram_user.number_of_followings = }''') print(f'''{instagram_user.email = }''') print(f'''{instagram_user.website = }''') print(f'''{instagram_user.profile_picture_url = }''') print(f'''{instagram_user.is_verified = }''') print(f'''{instagram_user.is_private = }''')
79
0
'''simple docstring''' import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = 0 def lowerCamelCase_ ( self : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE : str = Path(UpperCAmelCase_ ) / """preprocessor_config.json""" SCREAMING_SNAKE_CASE : int = Path(UpperCAmelCase_ ) / """config.json""" json.dump( {"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(UpperCAmelCase_ , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(UpperCAmelCase_ , """w""" ) ) SCREAMING_SNAKE_CASE : Union[str, Any] = AutoImageProcessor.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE : List[str] = Path(UpperCAmelCase_ ) / """preprocessor_config.json""" SCREAMING_SNAKE_CASE : str = Path(UpperCAmelCase_ ) / """config.json""" json.dump( {"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(UpperCAmelCase_ , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(UpperCAmelCase_ , """w""" ) ) SCREAMING_SNAKE_CASE : List[str] = AutoImageProcessor.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE : Optional[Any] = CLIPConfig() # Create a dummy config file with image_proceesor_type SCREAMING_SNAKE_CASE : List[Any] = Path(UpperCAmelCase_ ) / """preprocessor_config.json""" SCREAMING_SNAKE_CASE : Any = Path(UpperCAmelCase_ ) / """config.json""" json.dump( {"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(UpperCAmelCase_ , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(UpperCAmelCase_ , """w""" ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained(UpperCAmelCase_ ).to_dict() config_dict.pop("""image_processor_type""" ) SCREAMING_SNAKE_CASE : Dict = CLIPImageProcessor(**UpperCAmelCase_ ) # save in new folder model_config.save_pretrained(UpperCAmelCase_ ) config.save_pretrained(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained(UpperCAmelCase_ ) # make sure private variable is not incorrectly saved SCREAMING_SNAKE_CASE : List[str] = json.loads(config.to_json_string() ) self.assertTrue("""_processor_class""" not in dict_as_saved ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE : List[Any] = Path(UpperCAmelCase_ ) / """preprocessor_config.json""" json.dump( {"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(UpperCAmelCase_ , """w""" ) , ) SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' with self.assertRaisesRegex( UpperCAmelCase_ , """clip-base is not a local folder and is not a valid model identifier""" ): SCREAMING_SNAKE_CASE : List[str] = AutoImageProcessor.from_pretrained("""clip-base""" ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' with self.assertRaisesRegex( UpperCAmelCase_ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained(UpperCAmelCase_ , revision="""aaaaaa""" ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' with self.assertRaisesRegex( UpperCAmelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ): SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' with self.assertRaises(UpperCAmelCase_ ): SCREAMING_SNAKE_CASE : Tuple = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(UpperCAmelCase_ ): SCREAMING_SNAKE_CASE : Tuple = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=UpperCAmelCase_ ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained(UpperCAmelCase_ , trust_remote_code=UpperCAmelCase_ ) self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" ) def lowerCamelCase_ ( self : str ): '''simple docstring''' try: AutoConfig.register("""custom""" , UpperCAmelCase_ ) AutoImageProcessor.register(UpperCAmelCase_ , UpperCAmelCase_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(UpperCAmelCase_ ): AutoImageProcessor.register(UpperCAmelCase_ , UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE : List[Any] = Path(UpperCAmelCase_ ) / """preprocessor_config.json""" SCREAMING_SNAKE_CASE : Union[str, Any] = Path(UpperCAmelCase_ ) / """config.json""" json.dump( {"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(UpperCAmelCase_ , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(UpperCAmelCase_ , """w""" ) ) SCREAMING_SNAKE_CASE : Dict = CustomImageProcessor.from_pretrained(UpperCAmelCase_ ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = AutoImageProcessor.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def lowerCamelCase_ ( self : int ): '''simple docstring''' class UpperCamelCase__ ( __UpperCAmelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = True try: AutoConfig.register("""custom""" , UpperCAmelCase_ ) AutoImageProcessor.register(UpperCAmelCase_ , UpperCAmelCase_ ) # If remote code is not set, the default is to use local SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=UpperCAmelCase_ ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=UpperCAmelCase_ ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) self.assertTrue(not hasattr(UpperCAmelCase_ , """is_local""" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
711
'''simple docstring''' import argparse import logging from collections import namedtuple import torch from model_bertabs import BertAbsSummarizer from models.model_builder import AbsSummarizer # The authors' implementation from transformers import BertTokenizer logging.basicConfig(level=logging.INFO) __UpperCAmelCase = logging.getLogger(__name__) __UpperCAmelCase = """Hello world! cécé herlolip""" __UpperCAmelCase = namedtuple( """BertAbsConfig""", [ """temp_dir""", """large""", """use_bert_emb""", """finetune_bert""", """encoder""", """share_emb""", """max_pos""", """enc_layers""", """enc_hidden_size""", """enc_heads""", """enc_ff_size""", """enc_dropout""", """dec_layers""", """dec_hidden_size""", """dec_heads""", """dec_ff_size""", """dec_dropout""", ], ) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = BertAbsConfig( temp_dir=""".""" , finetune_bert=lowerCamelCase_ , large=lowerCamelCase_ , share_emb=lowerCamelCase_ , use_bert_emb=lowerCamelCase_ , encoder="""bert""" , max_pos=5_12 , enc_layers=6 , enc_hidden_size=5_12 , enc_heads=8 , enc_ff_size=5_12 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_68 , dec_heads=8 , dec_ff_size=20_48 , dec_dropout=0.2 , ) SCREAMING_SNAKE_CASE : int = torch.load(lowerCamelCase_ , lambda lowerCamelCase_ , lowerCamelCase_ : storage ) SCREAMING_SNAKE_CASE : List[str] = AbsSummarizer(lowerCamelCase_ , torch.device("""cpu""" ) , lowerCamelCase_ ) original.eval() SCREAMING_SNAKE_CASE : Optional[int] = BertAbsSummarizer(lowerCamelCase_ , torch.device("""cpu""" ) ) new_model.eval() # ------------------- # Convert the weights # ------------------- logging.info("""convert the model""" ) new_model.bert.load_state_dict(original.bert.state_dict() ) new_model.decoder.load_state_dict(original.decoder.state_dict() ) new_model.generator.load_state_dict(original.generator.state_dict() ) # ---------------------------------- # Make sure the outpus are identical # ---------------------------------- logging.info("""Make sure that the models' outputs are identical""" ) SCREAMING_SNAKE_CASE : int = BertTokenizer.from_pretrained("""bert-base-uncased""" ) # prepare the model inputs SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.encode("""This is sample éàalj'-.""" ) encoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(lowerCamelCase_ )) ) SCREAMING_SNAKE_CASE : List[str] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 ) SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode("""This is sample 3 éàalj'-.""" ) decoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(lowerCamelCase_ )) ) SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 ) # failsafe to make sure the weights reset does not affect the # loaded weights. assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0 # forward pass SCREAMING_SNAKE_CASE : Optional[int] = encoder_input_ids SCREAMING_SNAKE_CASE : Optional[Any] = decoder_input_ids SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : Tuple = None SCREAMING_SNAKE_CASE : int = None SCREAMING_SNAKE_CASE : List[Any] = None SCREAMING_SNAKE_CASE : Optional[int] = None # The original model does not apply the geneator layer immediatly but rather in # the beam search (where it combines softmax + linear layer). Since we already # apply the softmax in our generation process we only apply the linear layer here. # We make sure that the outputs of the full stack are identical SCREAMING_SNAKE_CASE : str = original(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0] SCREAMING_SNAKE_CASE : Optional[Any] = original.generator(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = new_model( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0] SCREAMING_SNAKE_CASE : str = new_model.generator(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = torch.max(torch.abs(output_converted_model - output_original_model ) ).item() print("""Maximum absolute difference beween weights: {:.2f}""".format(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Optional[int] = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item() print("""Maximum absolute difference beween weights: {:.2f}""".format(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Any = torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) if are_identical: logging.info("""all weights are equal up to 1e-3""" ) else: raise ValueError("""the weights are different. The new model is likely different from the original one.""" ) # The model has been saved with torch.save(model) and this is bound to the exact # directory structure. We save the state_dict instead. logging.info("""saving the model's state dictionary""" ) torch.save( new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument( """--bertabs_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""", ) __UpperCAmelCase = parser.parse_args() convert_bertabs_checkpoints( args.bertabs_checkpoint_path, args.pytorch_dump_folder_path, )
79
0
'''simple docstring''' import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {'vocab_file': 'vocab.txt'} __UpperCAmelCase = { 'vocab_file': { 'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt', }, } __UpperCAmelCase = { 'openbmb/cpm-ant-10b': 1024, } def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = collections.OrderedDict() with open(_lowercase , """r""" , encoding="""utf-8""" ) as reader: SCREAMING_SNAKE_CASE : List[str] = reader.readlines() for index, token in enumerate(_lowercase ): SCREAMING_SNAKE_CASE : Union[str, Any] = token.rstrip("""\n""" ) SCREAMING_SNAKE_CASE : Optional[int] = index return vocab class UpperCamelCase__ ( UpperCamelCase_ ): """simple docstring""" def __init__( self : Tuple , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Union[str, Any]="<unk>" , lowerCamelCase_ : List[Any]=2_00 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = vocab SCREAMING_SNAKE_CASE : str = unk_token SCREAMING_SNAKE_CASE : Any = max_input_chars_per_word def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = list(UpperCamelCase__ ) if len(UpperCamelCase__ ) > self.max_input_chars_per_word: return [self.unk_token] SCREAMING_SNAKE_CASE : Optional[Any] = 0 SCREAMING_SNAKE_CASE : Optional[Any] = [] while start < len(UpperCamelCase__ ): SCREAMING_SNAKE_CASE : List[Any] = len(UpperCamelCase__ ) SCREAMING_SNAKE_CASE : Any = None while start < end: SCREAMING_SNAKE_CASE : int = ''''''.join(chars[start:end] ) if substr in self.vocab: SCREAMING_SNAKE_CASE : List[Any] = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token ) start += 1 else: sub_tokens.append(UpperCamelCase__ ) SCREAMING_SNAKE_CASE : List[str] = end return sub_tokens class UpperCamelCase__ ( UpperCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ = ['''input_ids''', '''attention_mask'''] SCREAMING_SNAKE_CASE__ = False def __init__( self : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[str, Any]="<d>" , lowerCamelCase_ : Union[str, Any]="</d>" , lowerCamelCase_ : Dict="<s>" , lowerCamelCase_ : int="</s>" , lowerCamelCase_ : Optional[Any]="<pad>" , lowerCamelCase_ : Union[str, Any]="<unk>" , lowerCamelCase_ : int="</n>" , lowerCamelCase_ : List[str]="</_>" , lowerCamelCase_ : Tuple="left" , **lowerCamelCase_ : List[str] , ): '''simple docstring''' requires_backends(self , ["""jieba"""] ) super().__init__( bod_token=UpperCamelCase__ , eod_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , line_token=UpperCamelCase__ , space_token=UpperCamelCase__ , padding_side=UpperCamelCase__ , **UpperCamelCase__ , ) SCREAMING_SNAKE_CASE : List[str] = bod_token SCREAMING_SNAKE_CASE : Optional[int] = eod_token SCREAMING_SNAKE_CASE : Dict = load_vocab(UpperCamelCase__ ) SCREAMING_SNAKE_CASE : Any = self.encoder[space_token] SCREAMING_SNAKE_CASE : Any = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] SCREAMING_SNAKE_CASE : Union[str, Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase_ : x[1] ) ) SCREAMING_SNAKE_CASE : str = {v: k for k, v in self.encoder.items()} SCREAMING_SNAKE_CASE : Optional[int] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token ) @property def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' return self.encoder[self.bod_token] @property def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' return self.encoder[self.eod_token] @property def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' return self.encoder["\n"] @property def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' return len(self.encoder ) def lowerCamelCase_ ( self : int ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = [] for x in jieba.cut(UpperCamelCase__ , cut_all=UpperCamelCase__ ): output_tokens.extend(self.wordpiece_tokenizer.tokenize(UpperCamelCase__ ) ) return output_tokens def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = [i for i in token_ids if i >= 0] SCREAMING_SNAKE_CASE : int = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(UpperCamelCase__ , **UpperCamelCase__ ) def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Any ): '''simple docstring''' return token in self.encoder def lowerCamelCase_ ( self : int , lowerCamelCase_ : List[str] ): '''simple docstring''' return "".join(UpperCamelCase__ ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) ) def lowerCamelCase_ ( self : str , lowerCamelCase_ : str ): '''simple docstring''' return self.decoder.get(UpperCamelCase__ , self.unk_token ) def lowerCamelCase_ ( self : int , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ): '''simple docstring''' if os.path.isdir(UpperCamelCase__ ): SCREAMING_SNAKE_CASE : List[str] = os.path.join( UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) else: SCREAMING_SNAKE_CASE : int = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory SCREAMING_SNAKE_CASE : List[Any] = 0 if " " in self.encoder: SCREAMING_SNAKE_CASE : Optional[Any] = self.encoder[''' '''] del self.encoder[" "] if "\n" in self.encoder: SCREAMING_SNAKE_CASE : str = self.encoder['''\n'''] del self.encoder["\n"] SCREAMING_SNAKE_CASE : str = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase_ : x[1] ) ) with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.''' """ Please check that the vocabulary is not corrupted!""" ) SCREAMING_SNAKE_CASE : Optional[Any] = token_index writer.write(token + """\n""" ) index += 1 return (vocab_file,) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : List[int] = None ): '''simple docstring''' if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ ) if token_ids_a is not None: return [1] + ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) return [1] + ([0] * len(UpperCamelCase__ ))
712
'''simple docstring''' import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=True , lowerCamelCase_="pt" ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = {"""add_prefix_space""": True} if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and not line.startswith(""" """ ) else {} SCREAMING_SNAKE_CASE : Optional[Any] = padding_side return tokenizer( [line] , max_length=lowerCamelCase_ , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase_ , return_tensors=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , **lowerCamelCase_ , ) def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , ): """simple docstring""" SCREAMING_SNAKE_CASE : int = input_ids.ne(lowerCamelCase_ ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str]="train" , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : int=None , lowerCamelCase_ : Union[str, Any]="" , ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : str = Path(lowerCamelCase_ ).joinpath(type_path + """.source""" ) SCREAMING_SNAKE_CASE : Optional[Any] = Path(lowerCamelCase_ ).joinpath(type_path + """.target""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_char_lens(self.src_file ) SCREAMING_SNAKE_CASE : int = max_source_length SCREAMING_SNAKE_CASE : str = max_target_length assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}''' SCREAMING_SNAKE_CASE : List[str] = tokenizer SCREAMING_SNAKE_CASE : Dict = prefix if n_obs is not None: SCREAMING_SNAKE_CASE : List[Any] = self.src_lens[:n_obs] SCREAMING_SNAKE_CASE : int = src_lang SCREAMING_SNAKE_CASE : Optional[int] = tgt_lang def __len__( self : List[Any] ): '''simple docstring''' return len(self.src_lens ) def __getitem__( self : Union[str, Any] , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = index + 1 # linecache starts at 1 SCREAMING_SNAKE_CASE : Dict = self.prefix + linecache.getline(str(self.src_file ) , lowerCamelCase_ ).rstrip("""\n""" ) SCREAMING_SNAKE_CASE : Dict = linecache.getline(str(self.tgt_file ) , lowerCamelCase_ ).rstrip("""\n""" ) assert source_line, f'''empty source line for index {index}''' assert tgt_line, f'''empty tgt line for index {index}''' # Need to add eos token manually for T5 if isinstance(self.tokenizer , lowerCamelCase_ ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right SCREAMING_SNAKE_CASE : Union[str, Any] = ( self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCamelCase_ ) else self.tokenizer ) SCREAMING_SNAKE_CASE : Any = self.tokenizer.generator if isinstance(self.tokenizer , lowerCamelCase_ ) else self.tokenizer SCREAMING_SNAKE_CASE : Optional[int] = encode_line(lowerCamelCase_ , lowerCamelCase_ , self.max_source_length , """right""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = encode_line(lowerCamelCase_ , lowerCamelCase_ , self.max_target_length , """right""" ) SCREAMING_SNAKE_CASE : Tuple = source_inputs["""input_ids"""].squeeze() SCREAMING_SNAKE_CASE : Tuple = target_inputs["""input_ids"""].squeeze() SCREAMING_SNAKE_CASE : List[str] = source_inputs["""attention_mask"""].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def lowerCamelCase_ ( lowerCamelCase_ : Dict ): '''simple docstring''' return [len(lowerCamelCase_ ) for x in Path(lowerCamelCase_ ).open().readlines()] def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = torch.stack([x["""input_ids"""] for x in batch] ) SCREAMING_SNAKE_CASE : int = torch.stack([x["""attention_mask"""] for x in batch] ) SCREAMING_SNAKE_CASE : int = torch.stack([x["""decoder_input_ids"""] for x in batch] ) SCREAMING_SNAKE_CASE : int = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer , lowerCamelCase_ ) else self.tokenizer.pad_token_id ) SCREAMING_SNAKE_CASE : Dict = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer , lowerCamelCase_ ) else self.tokenizer.pad_token_id ) SCREAMING_SNAKE_CASE : Optional[int] = trim_batch(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = trim_batch(lowerCamelCase_ , lowerCamelCase_ , attention_mask=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = { """input_ids""": source_ids, """attention_mask""": source_mask, """decoder_input_ids""": y, } return batch __UpperCAmelCase = getLogger(__name__) def __A ( lowerCamelCase_ ): """simple docstring""" return list(itertools.chain.from_iterable(lowerCamelCase_ ) ) def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = get_git_info() save_json(lowerCamelCase_ , os.path.join(lowerCamelCase_ , """git_log.json""" ) ) def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=4 , **lowerCamelCase_ ): """simple docstring""" with open(lowerCamelCase_ , """w""" ) as f: json.dump(lowerCamelCase_ , lowerCamelCase_ , indent=lowerCamelCase_ , **lowerCamelCase_ ) def __A ( lowerCamelCase_ ): """simple docstring""" with open(lowerCamelCase_ ) as f: return json.load(lowerCamelCase_ ) def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = git.Repo(search_parent_directories=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = { """repo_id""": str(lowerCamelCase_ ), """repo_sha""": str(repo.head.object.hexsha ), """repo_branch""": str(repo.active_branch ), """hostname""": str(socket.gethostname() ), } return repo_infos def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return list(map(lowerCamelCase_ , lowerCamelCase_ ) ) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" with open(lowerCamelCase_ , """wb""" ) as f: return pickle.dump(lowerCamelCase_ , lowerCamelCase_ ) def __A ( lowerCamelCase_ ): """simple docstring""" def remove_articles(lowerCamelCase_ ): return re.sub(R"""\b(a|an|the)\b""" , """ """ , lowerCamelCase_ ) def white_space_fix(lowerCamelCase_ ): return " ".join(text.split() ) def remove_punc(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : int = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(lowerCamelCase_ ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase_ ) ) ) ) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = normalize_answer(lowerCamelCase_ ).split() SCREAMING_SNAKE_CASE : Optional[int] = normalize_answer(lowerCamelCase_ ).split() SCREAMING_SNAKE_CASE : Tuple = Counter(lowerCamelCase_ ) & Counter(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = sum(common.values() ) if num_same == 0: return 0 SCREAMING_SNAKE_CASE : Optional[int] = 1.0 * num_same / len(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = 1.0 * num_same / len(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = (2 * precision * recall) / (precision + recall) return fa def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return normalize_answer(lowerCamelCase_ ) == normalize_answer(lowerCamelCase_ ) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = 0 for hypo, pred in zip(lowerCamelCase_ , lowerCamelCase_ ): em += exact_match_score(lowerCamelCase_ , lowerCamelCase_ ) if len(lowerCamelCase_ ) > 0: em /= len(lowerCamelCase_ ) return {"em": em} def __A ( lowerCamelCase_ ): """simple docstring""" return model_prefix.startswith("""rag""" ) def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead SCREAMING_SNAKE_CASE : Dict = """dropout_rate""" for p in extra_params: if getattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): if not hasattr(lowerCamelCase_ , lowerCamelCase_ ) and not hasattr(lowerCamelCase_ , equivalent_param[p] ): logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase_ ) ) delattr(lowerCamelCase_ , lowerCamelCase_ ) continue SCREAMING_SNAKE_CASE : Dict = p if hasattr(lowerCamelCase_ , lowerCamelCase_ ) else equivalent_param[p] setattr(lowerCamelCase_ , lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) ) delattr(lowerCamelCase_ , lowerCamelCase_ ) return hparams, config
79
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor __UpperCAmelCase = logging.get_logger(__name__) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : List[Any] , *lowerCamelCase_ : Any , **lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' warnings.warn( """The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use FlavaImageProcessor instead.""" , _SCREAMING_SNAKE_CASE , ) super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
713
'''simple docstring''' def __A ( lowerCamelCase_ ): """simple docstring""" return 1 if digit in (0, 1) else (digit * factorial(digit - 1 )) def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = 0 SCREAMING_SNAKE_CASE : List[str] = number while duplicate > 0: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = divmod(lowerCamelCase_ , 10 ) fact_sum += factorial(lowerCamelCase_ ) return fact_sum == number if __name__ == "__main__": print("""Program to check whether a number is a Krisnamurthy Number or not.""") __UpperCAmelCase = int(input("""Enter number: """).strip()) print( f'''{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number.''' )
79
0
import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler __UpperCAmelCase = 16 __UpperCAmelCase = 32 def __A ( lowerCamelCase_ , lowerCamelCase_ = 16 , lowerCamelCase_ = "bert-base-cased" ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = AutoTokenizer.from_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(lowerCamelCase_ ): # max_length=None => use the model max length (it's actually the default) SCREAMING_SNAKE_CASE : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset SCREAMING_SNAKE_CASE : List[str] = datasets.map( lowerCamelCase_ , batched=lowerCamelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowerCamelCase_ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library SCREAMING_SNAKE_CASE : Optional[Any] = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(lowerCamelCase_ ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(lowerCamelCase_ , padding="""max_length""" , max_length=1_28 , return_tensors="""pt""" ) return tokenizer.pad(lowerCamelCase_ , padding="""longest""" , return_tensors="""pt""" ) # Instantiate dataloaders. SCREAMING_SNAKE_CASE : Any = DataLoader( tokenized_datasets["""train"""] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = DataLoader( tokenized_datasets["""validation"""] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ ) return train_dataloader, eval_dataloader def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs SCREAMING_SNAKE_CASE : Any = config["""lr"""] SCREAMING_SNAKE_CASE : Dict = int(config["""num_epochs"""] ) SCREAMING_SNAKE_CASE : List[Any] = int(config["""seed"""] ) SCREAMING_SNAKE_CASE : List[str] = int(config["""batch_size"""] ) SCREAMING_SNAKE_CASE : List[Any] = args.model_name_or_path set_seed(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = get_dataloaders(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) SCREAMING_SNAKE_CASE : Tuple = AutoModelForSequenceClassification.from_pretrained(lowerCamelCase_ , return_dict=lowerCamelCase_ ) # Instantiate optimizer SCREAMING_SNAKE_CASE : Tuple = ( AdamW if accelerator.state.deepspeed_plugin is None or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) SCREAMING_SNAKE_CASE : List[str] = optimizer_cls(params=model.parameters() , lr=lowerCamelCase_ ) if accelerator.state.deepspeed_plugin is not None: SCREAMING_SNAKE_CASE : Any = accelerator.state.deepspeed_plugin.deepspeed_config[ """gradient_accumulation_steps""" ] else: SCREAMING_SNAKE_CASE : Any = 1 SCREAMING_SNAKE_CASE : str = (len(lowerCamelCase_ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): SCREAMING_SNAKE_CASE : List[Any] = get_linear_schedule_with_warmup( optimizer=lowerCamelCase_ , num_warmup_steps=0 , num_training_steps=lowerCamelCase_ , ) else: SCREAMING_SNAKE_CASE : Dict = DummyScheduler(lowerCamelCase_ , total_num_steps=lowerCamelCase_ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. SCREAMING_SNAKE_CASE : Dict = accelerator.prepare( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # We need to keep track of how many total steps we have iterated over SCREAMING_SNAKE_CASE : str = 0 # We also need to keep track of the stating epoch so files are named properly SCREAMING_SNAKE_CASE : Tuple = 0 # Now we train the model SCREAMING_SNAKE_CASE : str = evaluate.load("""glue""" , """mrpc""" ) SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : Tuple = {} for epoch in range(lowerCamelCase_ , lowerCamelCase_ ): model.train() for step, batch in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : List[str] = model(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = outputs.loss SCREAMING_SNAKE_CASE : List[Any] = loss / gradient_accumulation_steps accelerator.backward(lowerCamelCase_ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() SCREAMING_SNAKE_CASE : List[Any] = 0 for step, batch in enumerate(lowerCamelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): SCREAMING_SNAKE_CASE : Optional[int] = model(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times SCREAMING_SNAKE_CASE : str = accelerator.gather( (predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(lowerCamelCase_ ) - 1: SCREAMING_SNAKE_CASE : int = predictions[: len(eval_dataloader.dataset ) - samples_seen] SCREAMING_SNAKE_CASE : Union[str, Any] = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=lowerCamelCase_ , references=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : List[str] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = eval_metric["""accuracy"""] if best_performance < eval_metric["accuracy"]: SCREAMING_SNAKE_CASE : Dict = eval_metric["""accuracy"""] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}''' accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , """all_results.json""" ) , """w""" ) as f: json.dump(lowerCamelCase_ , lowerCamelCase_ ) def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" ) parser.add_argument( """--model_name_or_path""" , type=lowerCamelCase_ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowerCamelCase_ , ) parser.add_argument( """--output_dir""" , type=lowerCamelCase_ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , ) parser.add_argument( """--performance_lower_bound""" , type=lowerCamelCase_ , default=lowerCamelCase_ , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , ) parser.add_argument( """--num_epochs""" , type=lowerCamelCase_ , default=3 , help="""Number of train epochs.""" , ) SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args() SCREAMING_SNAKE_CASE : Any = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16} training_function(lowerCamelCase_ , lowerCamelCase_ ) if __name__ == "__main__": main()
714
'''simple docstring''' # Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING import numpy as np import pyarrow as pa from .. import config from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import torch class UpperCamelCase__ ( TensorFormatter[Mapping, '''torch.Tensor''', Mapping] ): """simple docstring""" def __init__( self : Tuple , lowerCamelCase_ : str=None , **lowerCamelCase_ : Dict ): '''simple docstring''' super().__init__(features=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = torch_tensor_kwargs import torch # noqa import torch at initialization def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' import torch if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and column: if all( isinstance(lowerCamelCase_ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return torch.stack(lowerCamelCase_ ) return column def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : int ): '''simple docstring''' import torch if isinstance(lowerCamelCase_ , (str, bytes, type(lowerCamelCase_ )) ): return value elif isinstance(lowerCamelCase_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() SCREAMING_SNAKE_CASE : str = {} if isinstance(lowerCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): SCREAMING_SNAKE_CASE : Any = {"""dtype""": torch.intaa} elif isinstance(lowerCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): SCREAMING_SNAKE_CASE : int = {"""dtype""": torch.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(lowerCamelCase_ , PIL.Image.Image ): SCREAMING_SNAKE_CASE : List[Any] = np.asarray(lowerCamelCase_ ) return torch.tensor(lowerCamelCase_ , **{**default_dtype, **self.torch_tensor_kwargs} ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' import torch # support for torch, tf, jax etc. if hasattr(lowerCamelCase_ , """__array__""" ) and not isinstance(lowerCamelCase_ , torch.Tensor ): SCREAMING_SNAKE_CASE : Dict = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(lowerCamelCase_ , np.ndarray ): if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(lowerCamelCase_ ) for substruct in data_struct] ) elif isinstance(lowerCamelCase_ , (list, tuple) ): return self._consolidate([self.recursive_tensorize(lowerCamelCase_ ) for substruct in data_struct] ) return self._tensorize(lowerCamelCase_ ) def lowerCamelCase_ ( self : int , lowerCamelCase_ : dict ): '''simple docstring''' return map_nested(self._recursive_tensorize , lowerCamelCase_ , map_list=lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : pa.Table ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.numpy_arrow_extractor().extract_row(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = self.python_features_decoder.decode_row(lowerCamelCase_ ) return self.recursive_tensorize(lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : pa.Table ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.numpy_arrow_extractor().extract_column(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = self.python_features_decoder.decode_column(lowerCamelCase_ , pa_table.column_names[0] ) SCREAMING_SNAKE_CASE : List[str] = self.recursive_tensorize(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = self._consolidate(lowerCamelCase_ ) return column def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : pa.Table ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.numpy_arrow_extractor().extract_batch(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = self.python_features_decoder.decode_batch(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = self.recursive_tensorize(lowerCamelCase_ ) for column_name in batch: SCREAMING_SNAKE_CASE : Tuple = self._consolidate(batch[column_name] ) return batch
79
0
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
715
'''simple docstring''' import itertools import os import random import tempfile import unittest import numpy as np from transformers import TvltFeatureExtractor, is_datasets_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch if is_datasets_available(): from datasets import load_dataset __UpperCAmelCase = random.Random() def __A ( lowerCamelCase_ , lowerCamelCase_=1.0 , lowerCamelCase_=None , lowerCamelCase_=None ): """simple docstring""" if rng is None: SCREAMING_SNAKE_CASE : Optional[Any] = global_rng SCREAMING_SNAKE_CASE : Optional[int] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self : List[str] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int]=7 , lowerCamelCase_ : Optional[int]=4_00 , lowerCamelCase_ : int=20_00 , lowerCamelCase_ : List[str]=20_48 , lowerCamelCase_ : Optional[Any]=1_28 , lowerCamelCase_ : Optional[Any]=1 , lowerCamelCase_ : str=5_12 , lowerCamelCase_ : Dict=30 , lowerCamelCase_ : Dict=4_41_00 , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = parent SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size SCREAMING_SNAKE_CASE : List[str] = min_seq_length SCREAMING_SNAKE_CASE : Any = max_seq_length SCREAMING_SNAKE_CASE : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) SCREAMING_SNAKE_CASE : int = spectrogram_length SCREAMING_SNAKE_CASE : List[Any] = feature_size SCREAMING_SNAKE_CASE : Any = num_audio_channels SCREAMING_SNAKE_CASE : Tuple = hop_length SCREAMING_SNAKE_CASE : str = chunk_length SCREAMING_SNAKE_CASE : Dict = sampling_rate def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return { "spectrogram_length": self.spectrogram_length, "feature_size": self.feature_size, "num_audio_channels": self.num_audio_channels, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "sampling_rate": self.sampling_rate, } def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Tuple=False , lowerCamelCase_ : Any=False ): '''simple docstring''' def _flatten(lowerCamelCase_ : Dict ): return list(itertools.chain(*lowerCamelCase_ ) ) if equal_length: SCREAMING_SNAKE_CASE : Optional[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size SCREAMING_SNAKE_CASE : Dict = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: SCREAMING_SNAKE_CASE : Optional[int] = [np.asarray(lowerCamelCase_ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class UpperCamelCase__ ( lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = TvltFeatureExtractor def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = TvltFeatureExtractionTester(self ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(lowerCamelCase_ , """spectrogram_length""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """feature_size""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """num_audio_channels""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """hop_length""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """chunk_length""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """sampling_rate""" ) ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE : Any = feat_extract_first.save_pretrained(lowerCamelCase_ )[0] check_json_file_has_correct_format(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_extraction_class.from_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = feat_extract_first.to_dict() SCREAMING_SNAKE_CASE : List[Any] = feat_extract_second.to_dict() SCREAMING_SNAKE_CASE : Optional[int] = dict_first.pop("""mel_filters""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = dict_second.pop("""mel_filters""" ) self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ ) ) self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE : int = os.path.join(lowerCamelCase_ , """feat_extract.json""" ) feat_extract_first.to_json_file(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = self.feature_extraction_class.from_json_file(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = feat_extract_first.to_dict() SCREAMING_SNAKE_CASE : int = feat_extract_second.to_dict() SCREAMING_SNAKE_CASE : List[str] = dict_first.pop("""mel_filters""" ) SCREAMING_SNAKE_CASE : Optional[Any] = dict_second.pop("""mel_filters""" ) self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ ) ) self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.feature_extraction_class(**self.feat_extract_dict ) # create three inputs of length 800, 1000, and 1200 SCREAMING_SNAKE_CASE : Optional[int] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] SCREAMING_SNAKE_CASE : int = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs] # Test not batched input SCREAMING_SNAKE_CASE : Union[str, Any] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=4_41_00 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test batched SCREAMING_SNAKE_CASE : Optional[int] = feature_extractor(lowerCamelCase_ , return_tensors="""np""" , sampling_rate=4_41_00 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test audio masking SCREAMING_SNAKE_CASE : List[str] = feature_extractor( lowerCamelCase_ , return_tensors="""np""" , sampling_rate=4_41_00 , mask_audio=lowerCamelCase_ ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test 2-D numpy arrays are batched. SCREAMING_SNAKE_CASE : Optional[int] = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)] SCREAMING_SNAKE_CASE : int = np.asarray(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = feature_extractor(lowerCamelCase_ , return_tensors="""np""" , sampling_rate=4_41_00 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" ) # automatic decoding with librispeech SCREAMING_SNAKE_CASE : Union[str, Any] = ds.sort("""id""" ).select(range(lowerCamelCase_ ) )[:num_samples]["""audio"""] return [x["array"] for x in speech_samples] def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self._load_datasamples(1 ) SCREAMING_SNAKE_CASE : Tuple = TvltFeatureExtractor() SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(lowerCamelCase_ , return_tensors="""pt""" ).audio_values self.assertEquals(audio_values.shape , (1, 1, 1_92, 1_28) ) SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[-0.3_032, -0.2_708], [-0.4_434, -0.4_007]] ) self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , lowerCamelCase_ , atol=1e-4 ) )
79
0
def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" if a < 0 or b < 0: raise ValueError("""the value of both inputs must be positive""" ) SCREAMING_SNAKE_CASE : Any = str(bin(_lowerCAmelCase ) )[2:] # remove the leading "0b" SCREAMING_SNAKE_CASE : List[str] = str(bin(_lowerCAmelCase ) )[2:] # remove the leading "0b" SCREAMING_SNAKE_CASE : List[str] = max(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) ) return "0b" + "".join( str(int(char_a != char_b ) ) for char_a, char_b in zip(a_binary.zfill(_lowerCAmelCase ) , b_binary.zfill(_lowerCAmelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
716
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __UpperCAmelCase = { """configuration_mvp""": ["""MVP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MvpConfig""", """MvpOnnxConfig"""], """tokenization_mvp""": ["""MvpTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ["""MvpTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ """MVP_PRETRAINED_MODEL_ARCHIVE_LIST""", """MvpForCausalLM""", """MvpForConditionalGeneration""", """MvpForQuestionAnswering""", """MvpForSequenceClassification""", """MvpModel""", """MvpPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
79
0
'''simple docstring''' from unittest.mock import patch import pyspark from datasets.packaged_modules.spark.spark import ( Spark, SparkExamplesIterable, _generate_iterable_examples, ) from ..utils import ( require_dill_gt_0_3_2, require_not_windows, ) def __A ( lowerCamelCase_ , lowerCamelCase_ ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = [] for part_id in partition_order: SCREAMING_SNAKE_CASE : List[Any] = df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect() for row_idx, row in enumerate(lowerCamelCase_ ): expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) ) return expected_row_ids_and_row_dicts @require_not_windows @require_dill_gt_0_3_2 def __A ( ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : int = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() SCREAMING_SNAKE_CASE : Tuple = spark.range(1_00 ).repartition(1 ) SCREAMING_SNAKE_CASE : Optional[Any] = Spark(lowerCamelCase_ ) # The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means # that each partition can hold 2 rows. spark_builder._repartition_df_if_needed(max_shard_size=16 ) # Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions. assert spark_builder.df.rdd.getNumPartitions() == 50 @require_not_windows @require_dill_gt_0_3_2 def __A ( ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Any = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() SCREAMING_SNAKE_CASE : str = spark.range(10 ).repartition(2 ) SCREAMING_SNAKE_CASE : int = [1, 0] SCREAMING_SNAKE_CASE : Optional[Any] = _generate_iterable_examples(lowerCamelCase_ , lowerCamelCase_ ) # Reverse the partitions. SCREAMING_SNAKE_CASE : Any = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCamelCase_ , lowerCamelCase_ ) for i, (row_id, row_dict) in enumerate(generate_fn() ): SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def __A ( ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE : int = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() SCREAMING_SNAKE_CASE : str = spark.range(10 ).repartition(1 ) SCREAMING_SNAKE_CASE : Tuple = SparkExamplesIterable(lowerCamelCase_ ) assert it.n_shards == 1 for i, (row_id, row_dict) in enumerate(lowerCamelCase_ ): assert row_id == f'''0_{i}''' assert row_dict == {"id": i} @require_not_windows @require_dill_gt_0_3_2 def __A ( ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() SCREAMING_SNAKE_CASE : Optional[int] = spark.range(30 ).repartition(3 ) # Mock the generator so that shuffle reverses the partition indices. with patch("""numpy.random.Generator""" ) as generator_mock: SCREAMING_SNAKE_CASE : List[str] = lambda lowerCamelCase_ : x.reverse() SCREAMING_SNAKE_CASE : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCamelCase_ , [2, 1, 0] ) SCREAMING_SNAKE_CASE : Optional[int] = SparkExamplesIterable(lowerCamelCase_ ).shuffle_data_sources(lowerCamelCase_ ) assert shuffled_it.n_shards == 3 for i, (row_id, row_dict) in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def __A ( ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() SCREAMING_SNAKE_CASE : List[Any] = spark.range(20 ).repartition(4 ) # Partitions 0 and 2 SCREAMING_SNAKE_CASE : Optional[int] = SparkExamplesIterable(lowerCamelCase_ ).shard_data_sources(worker_id=0 , num_workers=2 ) assert shard_it_a.n_shards == 2 SCREAMING_SNAKE_CASE : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCamelCase_ , [0, 2] ) for i, (row_id, row_dict) in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict # Partitions 1 and 3 SCREAMING_SNAKE_CASE : int = SparkExamplesIterable(lowerCamelCase_ ).shard_data_sources(worker_id=1 , num_workers=2 ) assert shard_it_a.n_shards == 2 SCREAMING_SNAKE_CASE : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCamelCase_ , [1, 3] ) for i, (row_id, row_dict) in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def __A ( ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : int = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() SCREAMING_SNAKE_CASE : Optional[Any] = spark.range(1_00 ).repartition(1 ) SCREAMING_SNAKE_CASE : int = Spark(lowerCamelCase_ ) # Choose a small max_shard_size for maximum partitioning. spark_builder._repartition_df_if_needed(max_shard_size=1 ) # The new number of partitions should not be greater than the number of rows. assert spark_builder.df.rdd.getNumPartitions() == 1_00
717
'''simple docstring''' __UpperCAmelCase = [ """Audio""", """Array2D""", """Array3D""", """Array4D""", """Array5D""", """ClassLabel""", """Features""", """Sequence""", """Value""", """Image""", """Translation""", """TranslationVariableLanguages""", ] from .audio import Audio from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value from .image import Image from .translation import Translation, TranslationVariableLanguages
79
0
'''simple docstring''' from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = analyze_text(__lowerCAmelCase ) SCREAMING_SNAKE_CASE : Dict = list(""" """ + ascii_lowercase ) # what is our total sum of probabilities. SCREAMING_SNAKE_CASE : Dict = sum(single_char_strings.values() ) # one length string SCREAMING_SNAKE_CASE : Any = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: SCREAMING_SNAKE_CASE : Any = single_char_strings[ch] SCREAMING_SNAKE_CASE : Any = my_str / all_sum my_fir_sum += prob * math.loga(__lowerCAmelCase ) # entropy formula. # print entropy print(f'''{round(-1 * my_fir_sum ):.1f}''' ) # two len string SCREAMING_SNAKE_CASE : Tuple = sum(two_char_strings.values() ) SCREAMING_SNAKE_CASE : Any = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: SCREAMING_SNAKE_CASE : List[Any] = cha + cha if sequence in two_char_strings: SCREAMING_SNAKE_CASE : Tuple = two_char_strings[sequence] SCREAMING_SNAKE_CASE : str = int(__lowerCAmelCase ) / all_sum my_sec_sum += prob * math.loga(__lowerCAmelCase ) # print second entropy print(f'''{round(-1 * my_sec_sum ):.1f}''' ) # print the difference between them print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' ) def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = Counter() # type: ignore SCREAMING_SNAKE_CASE : Tuple = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(__lowerCAmelCase ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def __A ( ): """simple docstring""" import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
718
'''simple docstring''' from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) __UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name __UpperCAmelCase = """ Examples: ```py >>> import torch >>> import numpy as np >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline >>> from transformers import pipeline >>> from diffusers.utils import load_image >>> def make_hint(image, depth_estimator): ... image = depth_estimator(image)[\"depth\"] ... image = np.array(image) ... image = image[:, :, None] ... image = np.concatenate([image, image, image], axis=2) ... detected_map = torch.from_numpy(image).float() / 255.0 ... hint = detected_map.permute(2, 0, 1) ... return hint >>> depth_estimator = pipeline(\"depth-estimation\") >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained( ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16 ... ) >>> pipe_prior = pipe_prior.to(\"cuda\") >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained( ... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16 ... ) >>> pipe = pipe.to(\"cuda\") >>> img = load_image( ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\" ... \"/kandinsky/cat.png\" ... ).resize((768, 768)) >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\") >>> prompt = \"A robot, 4k photo\" >>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\" >>> generator = torch.Generator(device=\"cuda\").manual_seed(43) >>> image_emb, zero_image_emb = pipe_prior( ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator ... ).to_tuple() >>> images = pipe( ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... hint=hint, ... num_inference_steps=50, ... generator=generator, ... height=768, ... width=768, ... ).images >>> images[0].save(\"robot_cat.png\") ``` """ def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=8 ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 SCREAMING_SNAKE_CASE : List[str] = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Union[str, Any] , lowerCamelCase_ : UNetaDConditionModel , lowerCamelCase_ : DDPMScheduler , lowerCamelCase_ : VQModel , ): '''simple docstring''' super().__init__() self.register_modules( unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , movq=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : str = 2 ** (len(self.movq.config.block_out_channels ) - 1) def lowerCamelCase_ ( self : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple , lowerCamelCase_ : int ): '''simple docstring''' if latents is None: SCREAMING_SNAKE_CASE : Tuple = randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ , device=lowerCamelCase_ , dtype=lowerCamelCase_ ) else: if latents.shape != shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) SCREAMING_SNAKE_CASE : Dict = latents.to(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = latents * scheduler.init_noise_sigma return latents def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Dict=0 ): '''simple docstring''' if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) SCREAMING_SNAKE_CASE : List[Any] = torch.device(f'''cuda:{gpu_id}''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Tuple=0 ): '''simple docstring''' if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ): from accelerate import cpu_offload_with_hook else: raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" ) SCREAMING_SNAKE_CASE : Any = torch.device(f'''cuda:{gpu_id}''' ) if self.device.type != "cpu": self.to("""cpu""" , silence_dtype_warnings=lowerCamelCase_ ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) SCREAMING_SNAKE_CASE : Union[str, Any] = None for cpu_offloaded_model in [self.unet, self.movq]: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = cpu_offload_with_hook(lowerCamelCase_ , lowerCamelCase_ , prev_module_hook=lowerCamelCase_ ) # We'll offload the last model manually. SCREAMING_SNAKE_CASE : str = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def lowerCamelCase_ ( self : str ): '''simple docstring''' if not hasattr(self.unet , """_hf_hook""" ): return self.device for module in self.unet.modules(): if ( hasattr(lowerCamelCase_ , """_hf_hook""" ) and hasattr(module._hf_hook , """execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(lowerCamelCase_ ) def __call__( self : Optional[Any] , lowerCamelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCamelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 1_00 , lowerCamelCase_ : float = 4.0 , lowerCamelCase_ : int = 1 , lowerCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ : Optional[torch.FloatTensor] = None , lowerCamelCase_ : Optional[str] = "pil" , lowerCamelCase_ : bool = True , ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self._execution_device SCREAMING_SNAKE_CASE : Optional[int] = guidance_scale > 1.0 if isinstance(lowerCamelCase_ , lowerCamelCase_ ): SCREAMING_SNAKE_CASE : int = torch.cat(lowerCamelCase_ , dim=0 ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Dict = torch.cat(lowerCamelCase_ , dim=0 ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat(lowerCamelCase_ , dim=0 ) SCREAMING_SNAKE_CASE : Any = image_embeds.shape[0] * num_images_per_prompt if do_classifier_free_guidance: SCREAMING_SNAKE_CASE : List[Any] = image_embeds.repeat_interleave(lowerCamelCase_ , dim=0 ) SCREAMING_SNAKE_CASE : Optional[int] = negative_image_embeds.repeat_interleave(lowerCamelCase_ , dim=0 ) SCREAMING_SNAKE_CASE : Dict = hint.repeat_interleave(lowerCamelCase_ , dim=0 ) SCREAMING_SNAKE_CASE : List[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCamelCase_ ) self.scheduler.set_timesteps(lowerCamelCase_ , device=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler.timesteps SCREAMING_SNAKE_CASE : Any = self.movq.config.latent_channels SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = downscale_height_and_width(lowerCamelCase_ , lowerCamelCase_ , self.movq_scale_factor ) # create initial latent SCREAMING_SNAKE_CASE : str = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , self.scheduler , ) for i, t in enumerate(self.progress_bar(lowerCamelCase_ ) ): # expand the latents if we are doing classifier free guidance SCREAMING_SNAKE_CASE : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents SCREAMING_SNAKE_CASE : Union[str, Any] = {"""image_embeds""": image_embeds, """hint""": hint} SCREAMING_SNAKE_CASE : Dict = self.unet( sample=lowerCamelCase_ , timestep=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , added_cond_kwargs=lowerCamelCase_ , return_dict=lowerCamelCase_ , )[0] if do_classifier_free_guidance: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = noise_pred.split(latents.shape[1] , dim=1 ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = noise_pred.chunk(2 ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = variance_pred.chunk(2 ) SCREAMING_SNAKE_CASE : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) SCREAMING_SNAKE_CASE : str = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , """variance_type""" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 SCREAMING_SNAKE_CASE : str = self.scheduler.step( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ , )[0] # post-processing SCREAMING_SNAKE_CASE : List[str] = self.movq.decode(lowerCamelCase_ , force_not_quantize=lowerCamelCase_ )["""sample"""] if output_type not in ["pt", "np", "pil"]: raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' ) if output_type in ["np", "pil"]: SCREAMING_SNAKE_CASE : Optional[int] = image * 0.5 + 0.5 SCREAMING_SNAKE_CASE : List[Any] = image.clamp(0 , 1 ) SCREAMING_SNAKE_CASE : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": SCREAMING_SNAKE_CASE : Any = self.numpy_to_pil(lowerCamelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCamelCase_ )
79
0
'''simple docstring''' import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = ArgumentParser( description=( """PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes""" ) ) # Optional arguments for the launch helper parser.add_argument("""--num_cores""" , type=lowercase__ , default=1 , help="""Number of TPU cores to use (1 or 8).""" ) # positional parser.add_argument( """training_script""" , type=lowercase__ , help=( """The full path to the single TPU training """ """program/script to be launched in parallel, """ """followed by all the arguments for the """ """training script""" ) , ) # rest from the training program parser.add_argument("""training_script_args""" , nargs=lowercase__ ) return parser.parse_args() def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = parse_args() # Import training_script as a module. SCREAMING_SNAKE_CASE : Union[str, Any] = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) SCREAMING_SNAKE_CASE : Optional[int] = script_fpath.stem SCREAMING_SNAKE_CASE : Tuple = importlib.import_module(lowercase__ ) # Patch sys.argv SCREAMING_SNAKE_CASE : int = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
719
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: __UpperCAmelCase = None __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} __UpperCAmelCase = { """vocab_file""": { """google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""", """google/bigbird-roberta-large""": ( """https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model""" ), """google/bigbird-base-trivia-itc""": ( """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model""" ), }, """tokenizer_file""": { """google/bigbird-roberta-base""": ( """https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json""" ), """google/bigbird-roberta-large""": ( """https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json""" ), """google/bigbird-base-trivia-itc""": ( """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json""" ), }, } __UpperCAmelCase = { """google/bigbird-roberta-base""": 4096, """google/bigbird-roberta-large""": 4096, """google/bigbird-base-trivia-itc""": 4096, } __UpperCAmelCase = """▁""" class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ = BigBirdTokenizer SCREAMING_SNAKE_CASE__ = ['''input_ids''', '''attention_mask'''] SCREAMING_SNAKE_CASE__ = [] def __init__( self : Any , lowerCamelCase_ : str=None , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict="<unk>" , lowerCamelCase_ : int="<s>" , lowerCamelCase_ : Optional[Any]="</s>" , lowerCamelCase_ : Dict="<pad>" , lowerCamelCase_ : Tuple="[SEP]" , lowerCamelCase_ : Dict="[MASK]" , lowerCamelCase_ : Union[str, Any]="[CLS]" , **lowerCamelCase_ : Dict , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else bos_token SCREAMING_SNAKE_CASE : Dict = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token SCREAMING_SNAKE_CASE : int = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token SCREAMING_SNAKE_CASE : Any = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else cls_token SCREAMING_SNAKE_CASE : Any = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE : int = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token super().__init__( lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , **lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : List[Any] = vocab_file SCREAMING_SNAKE_CASE : Optional[Any] = False if not self.vocab_file else True def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = [self.sep_token_id] SCREAMING_SNAKE_CASE : int = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ): '''simple docstring''' if already_has_special_tokens: if token_ids_a is not None: raise ValueError( """You should not supply a second sequence if the provided sequence of """ """ids is already formatted with special tokens for the model.""" ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase_ )) + [1] return [1] + ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) + [1] def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = [self.sep_token_id] SCREAMING_SNAKE_CASE : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCamelCase_ ( self : str , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(lowerCamelCase_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return SCREAMING_SNAKE_CASE : Tuple = os.path.join( lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ): copyfile(self.vocab_file , lowerCamelCase_ ) return (out_vocab_file,)
79
0
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class UpperCamelCase__ ( __lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = 42 def __init__( self : Any , lowerCamelCase_ : UNetaDModel , lowerCamelCase_ : ScoreSdeVeScheduler ): '''simple docstring''' super().__init__() self.register_modules(unet=__a , scheduler=__a ) @torch.no_grad() def __call__( self : List[str] , lowerCamelCase_ : int = 1 , lowerCamelCase_ : int = 20_00 , lowerCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ : Optional[str] = "pil" , lowerCamelCase_ : bool = True , **lowerCamelCase_ : Any , ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.unet.config.sample_size SCREAMING_SNAKE_CASE : List[Any] = (batch_size, 3, img_size, img_size) SCREAMING_SNAKE_CASE : str = self.unet SCREAMING_SNAKE_CASE : Any = randn_tensor(__a , generator=__a ) * self.scheduler.init_noise_sigma SCREAMING_SNAKE_CASE : Any = sample.to(self.device ) self.scheduler.set_timesteps(__a ) self.scheduler.set_sigmas(__a ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): SCREAMING_SNAKE_CASE : int = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device ) # correction step for _ in range(self.scheduler.config.correct_steps ): SCREAMING_SNAKE_CASE : Optional[Any] = self.unet(__a , __a ).sample SCREAMING_SNAKE_CASE : Any = self.scheduler.step_correct(__a , __a , generator=__a ).prev_sample # prediction step SCREAMING_SNAKE_CASE : Any = model(__a , __a ).sample SCREAMING_SNAKE_CASE : int = self.scheduler.step_pred(__a , __a , __a , generator=__a ) SCREAMING_SNAKE_CASE : List[str] = output.prev_sample, output.prev_sample_mean SCREAMING_SNAKE_CASE : Tuple = sample_mean.clamp(0 , 1 ) SCREAMING_SNAKE_CASE : Dict = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": SCREAMING_SNAKE_CASE : List[Any] = self.numpy_to_pil(__a ) if not return_dict: return (sample,) return ImagePipelineOutput(images=__a )
720
'''simple docstring''' import unittest import torch from torch import nn from diffusers.models.activations import get_activation class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = get_activation("""swish""" ) self.assertIsInstance(lowerCamelCase_ , nn.SiLU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = get_activation("""silu""" ) self.assertIsInstance(lowerCamelCase_ , nn.SiLU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = get_activation("""mish""" ) self.assertIsInstance(lowerCamelCase_ , nn.Mish ) self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = get_activation("""gelu""" ) self.assertIsInstance(lowerCamelCase_ , nn.GELU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
79
0
import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class UpperCamelCase__ ( datasets.BuilderConfig ): """simple docstring""" SCREAMING_SNAKE_CASE__ = None class UpperCamelCase__ ( datasets.ArrowBasedBuilder ): """simple docstring""" SCREAMING_SNAKE_CASE__ = PandasConfig def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[str] ): '''simple docstring''' if not self.config.data_files: raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) SCREAMING_SNAKE_CASE : str = dl_manager.download_and_extract(self.config.data_files ) if isinstance(UpperCAmelCase__ , (str, list, tuple) ): SCREAMING_SNAKE_CASE : Union[str, Any] = data_files if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): SCREAMING_SNAKE_CASE : Dict = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive SCREAMING_SNAKE_CASE : Tuple = [dl_manager.iter_files(UpperCAmelCase__ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )] SCREAMING_SNAKE_CASE : List[Any] = [] for split_name, files in data_files.items(): if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): SCREAMING_SNAKE_CASE : Tuple = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive SCREAMING_SNAKE_CASE : Any = [dl_manager.iter_files(UpperCAmelCase__ ) for file in files] splits.append(datasets.SplitGenerator(name=UpperCAmelCase__ , gen_kwargs={"""files""": files} ) ) return splits def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : pa.Table ): '''simple docstring''' if self.config.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example SCREAMING_SNAKE_CASE : List[Any] = table_cast(UpperCAmelCase__ , self.config.features.arrow_schema ) return pa_table def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' for i, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase__ ) ): with open(UpperCAmelCase__ , """rb""" ) as f: SCREAMING_SNAKE_CASE : Union[str, Any] = pa.Table.from_pandas(pd.read_pickle(UpperCAmelCase__ ) ) yield i, self._cast_table(UpperCAmelCase__ )
721
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { """microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""", """microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""", """microsoft/deberta-v2-xlarge-mnli""": ( """https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json""" ), """microsoft/deberta-v2-xxlarge-mnli""": ( """https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json""" ), } class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''deberta-v2''' def __init__( self : int , lowerCamelCase_ : Optional[Any]=12_81_00 , lowerCamelCase_ : str=15_36 , lowerCamelCase_ : int=24 , lowerCamelCase_ : List[str]=24 , lowerCamelCase_ : List[Any]=61_44 , lowerCamelCase_ : List[Any]="gelu" , lowerCamelCase_ : Optional[Any]=0.1 , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : str=5_12 , lowerCamelCase_ : str=0 , lowerCamelCase_ : Union[str, Any]=0.02 , lowerCamelCase_ : Dict=1e-7 , lowerCamelCase_ : Optional[int]=False , lowerCamelCase_ : Optional[int]=-1 , lowerCamelCase_ : List[str]=0 , lowerCamelCase_ : Tuple=True , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : Optional[Any]=0 , lowerCamelCase_ : Dict="gelu" , **lowerCamelCase_ : Optional[int] , ): '''simple docstring''' super().__init__(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = hidden_size SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers SCREAMING_SNAKE_CASE : int = num_attention_heads SCREAMING_SNAKE_CASE : List[str] = intermediate_size SCREAMING_SNAKE_CASE : int = hidden_act SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : str = max_position_embeddings SCREAMING_SNAKE_CASE : Tuple = type_vocab_size SCREAMING_SNAKE_CASE : Optional[int] = initializer_range SCREAMING_SNAKE_CASE : List[Any] = relative_attention SCREAMING_SNAKE_CASE : str = max_relative_positions SCREAMING_SNAKE_CASE : int = pad_token_id SCREAMING_SNAKE_CASE : List[str] = position_biased_input # Backwards compatibility if type(lowerCamelCase_ ) == str: SCREAMING_SNAKE_CASE : Dict = [x.strip() for x in pos_att_type.lower().split("""|""" )] SCREAMING_SNAKE_CASE : Any = pos_att_type SCREAMING_SNAKE_CASE : Any = vocab_size SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_eps SCREAMING_SNAKE_CASE : str = kwargs.get("""pooler_hidden_size""" , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = pooler_dropout SCREAMING_SNAKE_CASE : Any = pooler_hidden_act class UpperCamelCase__ ( lowercase_ ): """simple docstring""" @property def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' if self.task == "multiple-choice": SCREAMING_SNAKE_CASE : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: SCREAMING_SNAKE_CASE : Union[str, Any] = {0: """batch""", 1: """sequence"""} if self._config.type_vocab_size > 0: return OrderedDict( [("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] ) else: return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] ) @property def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' return 12 def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCamelCase_ : int = -1 , lowerCamelCase_ : int = -1 , lowerCamelCase_ : int = -1 , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional["TensorType"] = None , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 40 , lowerCamelCase_ : int = 40 , lowerCamelCase_ : "PreTrainedTokenizerBase" = None , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = super().generate_dummy_inputs(preprocessor=lowerCamelCase_ , framework=lowerCamelCase_ ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
79
0
'''simple docstring''' import itertools import os import random import tempfile import unittest import numpy as np from datasets import load_dataset from transformers import is_speech_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import WhisperFeatureExtractor if is_torch_available(): import torch __UpperCAmelCase = random.Random() def __A ( lowerCamelCase_ , lowerCamelCase_=1.0 , lowerCamelCase_=None , lowerCamelCase_=None ): """simple docstring""" if rng is None: SCREAMING_SNAKE_CASE : Optional[int] = global_rng SCREAMING_SNAKE_CASE : Union[str, Any] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self : Any , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[str]=7 , lowerCamelCase_ : str=4_00 , lowerCamelCase_ : str=20_00 , lowerCamelCase_ : str=10 , lowerCamelCase_ : Optional[int]=1_60 , lowerCamelCase_ : Optional[int]=8 , lowerCamelCase_ : int=0.0 , lowerCamelCase_ : List[str]=40_00 , lowerCamelCase_ : str=False , lowerCamelCase_ : Tuple=True , ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = parent SCREAMING_SNAKE_CASE : List[Any] = batch_size SCREAMING_SNAKE_CASE : Union[str, Any] = min_seq_length SCREAMING_SNAKE_CASE : Tuple = max_seq_length SCREAMING_SNAKE_CASE : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) SCREAMING_SNAKE_CASE : Tuple = padding_value SCREAMING_SNAKE_CASE : Tuple = sampling_rate SCREAMING_SNAKE_CASE : Any = return_attention_mask SCREAMING_SNAKE_CASE : Tuple = do_normalize SCREAMING_SNAKE_CASE : List[str] = feature_size SCREAMING_SNAKE_CASE : Any = chunk_length SCREAMING_SNAKE_CASE : Optional[int] = hop_length def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return { "feature_size": self.feature_size, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def lowerCamelCase_ ( self : int , lowerCamelCase_ : int=False , lowerCamelCase_ : Optional[int]=False ): '''simple docstring''' def _flatten(lowerCamelCase_ : str ): return list(itertools.chain(*A__ ) ) if equal_length: SCREAMING_SNAKE_CASE : int = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size SCREAMING_SNAKE_CASE : int = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: SCREAMING_SNAKE_CASE : Dict = [np.asarray(A__ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class UpperCamelCase__ ( __a , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = WhisperFeatureExtractor if is_speech_available() else None def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = WhisperFeatureExtractionTester(self ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE : Tuple = feat_extract_first.save_pretrained(A__ )[0] check_json_file_has_correct_format(A__ ) SCREAMING_SNAKE_CASE : Tuple = self.feature_extraction_class.from_pretrained(A__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = feat_extract_first.to_dict() SCREAMING_SNAKE_CASE : int = feat_extract_second.to_dict() SCREAMING_SNAKE_CASE : str = feat_extract_first.mel_filters SCREAMING_SNAKE_CASE : str = feat_extract_second.mel_filters self.assertTrue(np.allclose(A__ , A__ ) ) self.assertEqual(A__ , A__ ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(A__ , """feat_extract.json""" ) feat_extract_first.to_json_file(A__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_extraction_class.from_json_file(A__ ) SCREAMING_SNAKE_CASE : str = feat_extract_first.to_dict() SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_second.to_dict() SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_first.mel_filters SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_second.mel_filters self.assertTrue(np.allclose(A__ , A__ ) ) self.assertEqual(A__ , A__ ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 SCREAMING_SNAKE_CASE : Dict = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] SCREAMING_SNAKE_CASE : Dict = [np.asarray(A__ ) for speech_input in speech_inputs] # Test feature size SCREAMING_SNAKE_CASE : Any = feature_extractor(A__ , padding="""max_length""" , return_tensors="""np""" ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames ) self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size ) # Test not batched input SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features SCREAMING_SNAKE_CASE : Optional[Any] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) ) # Test batched SCREAMING_SNAKE_CASE : str = feature_extractor(A__ , return_tensors="""np""" ).input_features SCREAMING_SNAKE_CASE : int = feature_extractor(A__ , return_tensors="""np""" ).input_features for enc_seq_a, enc_seq_a in zip(A__ , A__ ): self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. SCREAMING_SNAKE_CASE : str = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)] SCREAMING_SNAKE_CASE : str = np.asarray(A__ ) SCREAMING_SNAKE_CASE : Optional[int] = feature_extractor(A__ , return_tensors="""np""" ).input_features SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(A__ , return_tensors="""np""" ).input_features for enc_seq_a, enc_seq_a in zip(A__ , A__ ): self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) ) # Test truncation required SCREAMING_SNAKE_CASE : Optional[Any] = [floats_list((1, x) )[0] for x in range(2_00 , (feature_extractor.n_samples + 5_00) , 2_00 )] SCREAMING_SNAKE_CASE : int = [np.asarray(A__ ) for speech_input in speech_inputs] SCREAMING_SNAKE_CASE : str = [x[: feature_extractor.n_samples] for x in speech_inputs] SCREAMING_SNAKE_CASE : Optional[int] = [np.asarray(A__ ) for speech_input in speech_inputs_truncated] SCREAMING_SNAKE_CASE : Optional[Any] = feature_extractor(A__ , return_tensors="""np""" ).input_features SCREAMING_SNAKE_CASE : Tuple = feature_extractor(A__ , return_tensors="""np""" ).input_features for enc_seq_a, enc_seq_a in zip(A__ , A__ ): self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' import torch SCREAMING_SNAKE_CASE : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.rand(1_00 , 32 ).astype(np.floataa ) SCREAMING_SNAKE_CASE : int = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: SCREAMING_SNAKE_CASE : Optional[int] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) SCREAMING_SNAKE_CASE : Dict = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" ) # automatic decoding with librispeech SCREAMING_SNAKE_CASE : str = ds.sort("""id""" ).select(range(A__ ) )[:num_samples]["""audio"""] return [x["array"] for x in speech_samples] def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor( [ 0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951, 0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678, 0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554, -0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854 ] ) # fmt: on SCREAMING_SNAKE_CASE : Tuple = self._load_datasamples(1 ) SCREAMING_SNAKE_CASE : List[Any] = WhisperFeatureExtractor() SCREAMING_SNAKE_CASE : Tuple = feature_extractor(A__ , return_tensors="""pt""" ).input_features self.assertEqual(input_features.shape , (1, 80, 30_00) ) self.assertTrue(torch.allclose(input_features[0, 0, :30] , A__ , atol=1e-4 ) ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE : Any = self._load_datasamples(1 )[0] SCREAMING_SNAKE_CASE : int = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_55_35 # Rescale to [0, 65535] to show issue SCREAMING_SNAKE_CASE : int = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=A__ )[0] self.assertTrue(np.all(np.mean(A__ ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(A__ ) - 1 ) < 1e-3 ) )
700
'''simple docstring''' from collections import deque from math import floor from random import random from time import time class UpperCamelCase__ : """simple docstring""" def __init__( self : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = {} def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[int]=1 ): '''simple docstring''' if self.graph.get(lowerCamelCase_ ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: SCREAMING_SNAKE_CASE : str = [[w, v]] if not self.graph.get(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Tuple = [] def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' return list(self.graph ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : str ): '''simple docstring''' if self.graph.get(lowerCamelCase_ ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any]=-2 , lowerCamelCase_ : str=-1 ): '''simple docstring''' if s == d: return [] SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : List[str] = [] if s == -2: SCREAMING_SNAKE_CASE : Tuple = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : Optional[Any] = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowerCamelCase_ ) return visited else: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : Tuple = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : int = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : Any = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return visited def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Optional[int]=-1 ): '''simple docstring''' if c == -1: SCREAMING_SNAKE_CASE : str = floor(random() * 1_00_00 ) + 10 for i in range(lowerCamelCase_ ): # every vertex has max 100 edges for _ in range(floor(random() * 1_02 ) + 1 ): SCREAMING_SNAKE_CASE : Union[str, Any] = floor(random() * c ) + 1 if n != i: self.add_pair(lowerCamelCase_ , lowerCamelCase_ , 1 ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Any=-2 ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = deque() SCREAMING_SNAKE_CASE : List[str] = [] if s == -2: SCREAMING_SNAKE_CASE : int = list(self.graph )[0] d.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) while d: SCREAMING_SNAKE_CASE : Dict = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Tuple ): '''simple docstring''' return len(self.graph[u] ) def lowerCamelCase_ ( self : str , lowerCamelCase_ : Optional[Any]=-2 ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = [] SCREAMING_SNAKE_CASE : List[str] = [] if s == -2: SCREAMING_SNAKE_CASE : Union[str, Any] = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = s SCREAMING_SNAKE_CASE : List[str] = [] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : Optional[Any] = s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : int = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : List[Any] = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : int = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return sorted_nodes def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = [] SCREAMING_SNAKE_CASE : Any = [] SCREAMING_SNAKE_CASE : List[Any] = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = -2 SCREAMING_SNAKE_CASE : int = [] SCREAMING_SNAKE_CASE : Union[str, Any] = s SCREAMING_SNAKE_CASE : Tuple = False SCREAMING_SNAKE_CASE : Union[str, Any] = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : Union[str, Any] = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): SCREAMING_SNAKE_CASE : Any = len(lowerCamelCase_ ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : int = node[1] break # check if all the children are visited if s == ss: stack.pop() SCREAMING_SNAKE_CASE : int = True if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : Any = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : List[str] = False indirect_parents.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = s SCREAMING_SNAKE_CASE : List[Any] = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return list(lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = [] SCREAMING_SNAKE_CASE : int = [] SCREAMING_SNAKE_CASE : Any = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = -2 SCREAMING_SNAKE_CASE : List[Any] = [] SCREAMING_SNAKE_CASE : Tuple = s SCREAMING_SNAKE_CASE : Dict = False SCREAMING_SNAKE_CASE : Optional[Any] = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : str = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): SCREAMING_SNAKE_CASE : str = len(lowerCamelCase_ ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : Optional[Any] = node[1] break # check if all the children are visited if s == ss: stack.pop() SCREAMING_SNAKE_CASE : Dict = True if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : List[str] = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : List[Any] = False indirect_parents.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = s SCREAMING_SNAKE_CASE : Optional[int] = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return False def lowerCamelCase_ ( self : Any , lowerCamelCase_ : str=-2 , lowerCamelCase_ : int=-1 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = time() self.dfs(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = time() return end - begin def lowerCamelCase_ ( self : int , lowerCamelCase_ : Tuple=-2 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = time() self.bfs(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = time() return end - begin class UpperCamelCase__ : """simple docstring""" def __init__( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = {} def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Union[str, Any]=1 ): '''simple docstring''' if self.graph.get(lowerCamelCase_ ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist SCREAMING_SNAKE_CASE : Any = [[w, v]] # add the other way if self.graph.get(lowerCamelCase_ ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist SCREAMING_SNAKE_CASE : Any = [[w, u]] def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any ): '''simple docstring''' if self.graph.get(lowerCamelCase_ ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowerCamelCase_ ) # the other way round if self.graph.get(lowerCamelCase_ ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(lowerCamelCase_ ) def lowerCamelCase_ ( self : int , lowerCamelCase_ : str=-2 , lowerCamelCase_ : List[str]=-1 ): '''simple docstring''' if s == d: return [] SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : Any = [] if s == -2: SCREAMING_SNAKE_CASE : List[Any] = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : Union[str, Any] = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowerCamelCase_ ) return visited else: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : Any = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : Any = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : List[str] = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return visited def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[str]=-1 ): '''simple docstring''' if c == -1: SCREAMING_SNAKE_CASE : Any = floor(random() * 1_00_00 ) + 10 for i in range(lowerCamelCase_ ): # every vertex has max 100 edges for _ in range(floor(random() * 1_02 ) + 1 ): SCREAMING_SNAKE_CASE : List[str] = floor(random() * c ) + 1 if n != i: self.add_pair(lowerCamelCase_ , lowerCamelCase_ , 1 ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[Any]=-2 ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = deque() SCREAMING_SNAKE_CASE : Tuple = [] if s == -2: SCREAMING_SNAKE_CASE : Any = list(self.graph )[0] d.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) while d: SCREAMING_SNAKE_CASE : List[Any] = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : str ): '''simple docstring''' return len(self.graph[u] ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : Any = [] SCREAMING_SNAKE_CASE : Optional[Any] = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = -2 SCREAMING_SNAKE_CASE : Tuple = [] SCREAMING_SNAKE_CASE : Any = s SCREAMING_SNAKE_CASE : Optional[Any] = False SCREAMING_SNAKE_CASE : Optional[Any] = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : Optional[Any] = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCamelCase_ ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : str = node[1] break # check if all the children are visited if s == ss: stack.pop() SCREAMING_SNAKE_CASE : Optional[int] = True if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : int = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : Union[str, Any] = False indirect_parents.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = s SCREAMING_SNAKE_CASE : str = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return list(lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = [] SCREAMING_SNAKE_CASE : int = [] SCREAMING_SNAKE_CASE : Any = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = -2 SCREAMING_SNAKE_CASE : int = [] SCREAMING_SNAKE_CASE : int = s SCREAMING_SNAKE_CASE : Union[str, Any] = False SCREAMING_SNAKE_CASE : Tuple = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : Any = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCamelCase_ ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : Tuple = node[1] break # check if all the children are visited if s == ss: stack.pop() SCREAMING_SNAKE_CASE : Any = True if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : str = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : Optional[Any] = False indirect_parents.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = s SCREAMING_SNAKE_CASE : Tuple = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return False def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return list(self.graph ) def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[str]=-2 , lowerCamelCase_ : str=-1 ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = time() self.dfs(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = time() return end - begin def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Dict=-2 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = time() self.bfs(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = time() return end - begin
79
0
'''simple docstring''' import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __UpperCAmelCase = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""") @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = GPTSwaTokenizer SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = False def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE : List[Any] = GPTSwaTokenizer(lowerCamelCase_ , eos_token="""<unk>""" , bos_token="""<unk>""" , pad_token="""<unk>""" ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = """This is a test""" SCREAMING_SNAKE_CASE : List[Any] = """This is a test""" return input_text, output_text def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = """<s>""" SCREAMING_SNAKE_CASE : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<unk>""" ) self.assertEqual(vocab_keys[1] , """<s>""" ) self.assertEqual(vocab_keys[-1] , """j""" ) self.assertEqual(len(lowerCamelCase_ ) , 20_00 ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 20_00 ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = GPTSwaTokenizer(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(lowerCamelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [4_65, 2_87, 2_65, 6_31, 8_42] ) SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) # fmt: off self.assertListEqual( lowerCamelCase_ , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] , ) # fmt: on SCREAMING_SNAKE_CASE : Tuple = tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) self.assertListEqual( lowerCamelCase_ , [2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] , ) SCREAMING_SNAKE_CASE : int = tokenizer.convert_ids_to_tokens(lowerCamelCase_ ) # fmt: off self.assertListEqual( lowerCamelCase_ , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] ) # fmt: on def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = GPTSwaTokenizer(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = ["""This is a test""", """I was born in 92000, and this is falsé."""] SCREAMING_SNAKE_CASE : Optional[Any] = [ [4_65, 2_87, 2_65, 6_31, 8_42], [2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assertListEqual(tokenizer.encode_fast(lowerCamelCase_ ) , lowerCamelCase_ ) # Test that decode_fast returns the input text for text, token_ids in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assertEqual(tokenizer.decode_fast(lowerCamelCase_ ) , lowerCamelCase_ ) @slow def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = [ """<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')""", """Hey there, how are you doing this fine day?""", """This is a text with a trailing spaces followed by a dot .""", """Häj sväjs lillebrör! =)""", """Det är inget fel på Mr. Cool""", ] # fmt: off SCREAMING_SNAKE_CASE : List[str] = {"""input_ids""": [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCamelCase_ , model_name="""AI-Sweden/gpt-sw3-126m""" , sequences=lowerCamelCase_ , )
701
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {"""vocab_file""": """sentencepiece.bpe.model"""} __UpperCAmelCase = { """vocab_file""": { """moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez-orangesum-title""": ( """https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model""" ), }, } __UpperCAmelCase = { """moussaKam/mbarthez""": 1024, """moussaKam/barthez""": 1024, """moussaKam/barthez-orangesum-title""": 1024, } __UpperCAmelCase = """▁""" class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ = ['''input_ids''', '''attention_mask'''] def __init__( self : Optional[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple="<s>" , lowerCamelCase_ : Union[str, Any]="</s>" , lowerCamelCase_ : Tuple="</s>" , lowerCamelCase_ : str="<s>" , lowerCamelCase_ : Optional[int]="<unk>" , lowerCamelCase_ : List[Any]="<pad>" , lowerCamelCase_ : Optional[Any]="<mask>" , lowerCamelCase_ : Optional[Dict[str, Any]] = None , **lowerCamelCase_ : List[str] , ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token SCREAMING_SNAKE_CASE : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Dict = vocab_file SCREAMING_SNAKE_CASE : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Optional[int] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3} SCREAMING_SNAKE_CASE : str = len(self.sp_model ) - 1 SCREAMING_SNAKE_CASE : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id] SCREAMING_SNAKE_CASE : int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase_ )) + [1] return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1] def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id] SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' return len(self.sp_model ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ): '''simple docstring''' return self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : str ): '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] SCREAMING_SNAKE_CASE : List[str] = self.sp_model.PieceToId(lowerCamelCase_ ) return spm_id if spm_id else self.unk_token_id def lowerCamelCase_ ( self : int , lowerCamelCase_ : List[str] ): '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = [] SCREAMING_SNAKE_CASE : Tuple = """""" SCREAMING_SNAKE_CASE : Dict = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(lowerCamelCase_ ) + token SCREAMING_SNAKE_CASE : str = True SCREAMING_SNAKE_CASE : Optional[Any] = [] else: current_sub_tokens.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = False out_string += self.sp_model.decode(lowerCamelCase_ ) return out_string.strip() def __getstate__( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.__dict__.copy() SCREAMING_SNAKE_CASE : List[Any] = None return state def __setstate__( self : Optional[Any] , lowerCamelCase_ : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): SCREAMING_SNAKE_CASE : int = {} SCREAMING_SNAKE_CASE : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(lowerCamelCase_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return SCREAMING_SNAKE_CASE : Dict = os.path.join( lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCamelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(lowerCamelCase_ , """wb""" ) as fi: SCREAMING_SNAKE_CASE : Any = self.sp_model.serialized_model_proto() fi.write(lowerCamelCase_ ) return (out_vocab_file,)
79
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { """abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""", } class UpperCamelCase__ ( __snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''gpt_neox_japanese''' def __init__( self : Optional[Any] , lowerCamelCase_ : str=3_20_00 , lowerCamelCase_ : List[Any]=25_60 , lowerCamelCase_ : Optional[Any]=32 , lowerCamelCase_ : List[Any]=32 , lowerCamelCase_ : List[str]=4 , lowerCamelCase_ : Any="gelu" , lowerCamelCase_ : Tuple=1.00 , lowerCamelCase_ : List[str]=1_00_00 , lowerCamelCase_ : Dict=20_48 , lowerCamelCase_ : Tuple=0.02 , lowerCamelCase_ : Optional[Any]=1e-5 , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Tuple=3_19_96 , lowerCamelCase_ : Optional[Any]=3_19_99 , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : List[str]=0.0 , **lowerCamelCase_ : Optional[Any] , ): '''simple docstring''' super().__init__(bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = vocab_size SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size SCREAMING_SNAKE_CASE : str = num_hidden_layers SCREAMING_SNAKE_CASE : Any = num_attention_heads SCREAMING_SNAKE_CASE : Dict = intermediate_multiple_size SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act SCREAMING_SNAKE_CASE : int = rotary_pct SCREAMING_SNAKE_CASE : Dict = rotary_emb_base SCREAMING_SNAKE_CASE : int = initializer_range SCREAMING_SNAKE_CASE : int = layer_norm_eps SCREAMING_SNAKE_CASE : List[Any] = use_cache SCREAMING_SNAKE_CASE : Any = attention_dropout SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout
702
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFXLMRobertaModel @require_tf @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = TFXLMRobertaModel.from_pretrained("""jplu/tf-xlm-roberta-base""" ) SCREAMING_SNAKE_CASE : Dict = { """input_ids""": tf.convert_to_tensor([[0, 26_46, 1_02_69, 83, 9_99_42, 2]] , dtype=tf.intaa ), # "My dog is cute" """attention_mask""": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ), } SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ )["""last_hidden_state"""] SCREAMING_SNAKE_CASE : Union[str, Any] = tf.TensorShape((1, 6, 7_68) ) self.assertEqual(output.shape , lowerCamelCase_ ) # compare the actual values for a slice. SCREAMING_SNAKE_CASE : Optional[int] = tf.convert_to_tensor( [ [ [0.0_681_762, 0.10_894_451, 0.06_772_504], [-0.06_423_668, 0.02_366_615, 0.04_329_344], [-0.06_057_295, 0.09_974_135, -0.00_070_584], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
79
0
'''simple docstring''' from . import __version__ # Backward compatibility imports, to make sure all those objects can be found in file_utils from .utils import ( CLOUDFRONT_DISTRIB_PREFIX, CONFIG_NAME, DISABLE_TELEMETRY, DUMMY_INPUTS, DUMMY_MASK, ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, FEATURE_EXTRACTOR_NAME, FLAX_WEIGHTS_NAME, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, MODEL_CARD_NAME, MULTIPLE_CHOICE_DUMMY_INPUTS, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, SENTENCEPIECE_UNDERLINE, SPIECE_UNDERLINE, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, TORCH_FX_REQUIRED_VERSION, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, USE_JAX, USE_TF, USE_TORCH, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, DummyObject, EntryNotFoundError, ExplicitEnum, ModelOutput, PaddingStrategy, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, TensorType, _LazyModule, add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, cached_property, copy_func, default_cache_path, define_sagemaker_information, get_cached_models, get_file_from_repo, get_full_repo_name, get_torch_version, has_file, http_user_agent, is_apex_available, is_bsa_available, is_coloredlogs_available, is_datasets_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_librosa_available, is_offline_mode, is_onnx_available, is_pandas_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytorch_quantization_available, is_rjieba_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_tensor, is_tensorflow_probability_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_training_run_on_sagemaker, is_vision_available, replace_return_docstrings, requires_backends, to_numpy, to_py_obj, torch_only_method, )
703
'''simple docstring''' from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = None # Automatically constructed SCREAMING_SNAKE_CASE__ = "dict" SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = field(default='''Translation''' , init=lowercase_ , repr=lowercase_ ) def __call__( self : int ): '''simple docstring''' return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' from .features import Value return {k: Value("""string""" ) for k in sorted(self.languages )} @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None # Automatically constructed SCREAMING_SNAKE_CASE__ = "dict" SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = field(default='''TranslationVariableLanguages''' , init=lowercase_ , repr=lowercase_ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = sorted(set(self.languages ) ) if self.languages else None SCREAMING_SNAKE_CASE : str = len(self.languages ) if self.languages else None def __call__( self : Tuple ): '''simple docstring''' return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = set(self.languages ) if self.languages and set(lowerCamelCase_ ) - lang_set: raise ValueError( f'''Some languages in example ({", ".join(sorted(set(lowerCamelCase_ ) - lang_set ) )}) are not in valid set ({", ".join(lowerCamelCase_ )}).''' ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. SCREAMING_SNAKE_CASE : List[Any] = [] for lang, text in translation_dict.items(): if isinstance(lowerCamelCase_ , lowerCamelCase_ ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = zip(*sorted(lowerCamelCase_ ) ) return {"language": languages, "translation": translations} def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' from .features import Sequence, Value return { "language": Sequence(Value("""string""" ) ), "translation": Sequence(Value("""string""" ) ), }
79
0
'''simple docstring''' def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = len(SCREAMING_SNAKE_CASE__ ) for i in range(1 , SCREAMING_SNAKE_CASE__ ): SCREAMING_SNAKE_CASE : Tuple = collection[i] SCREAMING_SNAKE_CASE : Tuple = 0 SCREAMING_SNAKE_CASE : str = i - 1 while low <= high: SCREAMING_SNAKE_CASE : Optional[int] = (low + high) // 2 if val < collection[mid]: SCREAMING_SNAKE_CASE : List[str] = mid - 1 else: SCREAMING_SNAKE_CASE : str = mid + 1 for j in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , -1 ): SCREAMING_SNAKE_CASE : List[str] = collection[j - 1] SCREAMING_SNAKE_CASE : Any = val return collection if __name__ == "__main__": __UpperCAmelCase = input("""Enter numbers separated by a comma:\n""").strip() __UpperCAmelCase = [int(item) for item in user_input.split(""",""")] print(binary_insertion_sort(unsorted))
704
'''simple docstring''' import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor __UpperCAmelCase = logging.get_logger(__name__) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Dict , *lowerCamelCase_ : List[str] , **lowerCamelCase_ : Dict ): '''simple docstring''' warnings.warn( """The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use FlavaImageProcessor instead.""" , lowerCamelCase_ , ) super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
79
0
'''simple docstring''' from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __UpperCAmelCase = logging.get_logger(__name__) class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''pixel_values'''] def __init__( self : str , lowerCamelCase_ : bool = True , lowerCamelCase_ : Dict[str, int] = None , lowerCamelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCamelCase_ : bool = True , lowerCamelCase_ : Dict[str, int] = None , lowerCamelCase_ : bool = True , lowerCamelCase_ : Union[int, float] = 1 / 2_55 , lowerCamelCase_ : bool = True , lowerCamelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , lowerCamelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **lowerCamelCase_ : Tuple , ): '''simple docstring''' super().__init__(**__snake_case ) SCREAMING_SNAKE_CASE : Optional[int] = size if size is not None else {'''shortest_edge''': 2_24} SCREAMING_SNAKE_CASE : str = get_size_dict(__snake_case , default_to_square=__snake_case ) SCREAMING_SNAKE_CASE : Optional[int] = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24} SCREAMING_SNAKE_CASE : List[str] = get_size_dict(__snake_case , param_name="""crop_size""" ) SCREAMING_SNAKE_CASE : Tuple = do_resize SCREAMING_SNAKE_CASE : Dict = size SCREAMING_SNAKE_CASE : Optional[Any] = resample SCREAMING_SNAKE_CASE : List[str] = do_center_crop SCREAMING_SNAKE_CASE : List[Any] = crop_size SCREAMING_SNAKE_CASE : Dict = do_rescale SCREAMING_SNAKE_CASE : str = rescale_factor SCREAMING_SNAKE_CASE : str = do_normalize SCREAMING_SNAKE_CASE : Dict = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN SCREAMING_SNAKE_CASE : Union[str, Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : np.ndarray , lowerCamelCase_ : Dict[str, int] , lowerCamelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ : int , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(__snake_case , default_to_square=__snake_case ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: SCREAMING_SNAKE_CASE : List[str] = int((2_56 / 2_24) * size["""shortest_edge"""] ) SCREAMING_SNAKE_CASE : Union[str, Any] = get_resize_output_image_size(__snake_case , size=__snake_case , default_to_square=__snake_case ) SCREAMING_SNAKE_CASE : List[str] = {'''height''': output_size[0], '''width''': output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( f'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' ) return resize( __snake_case , size=(size_dict["""height"""], size_dict["""width"""]) , resample=__snake_case , data_format=__snake_case , **__snake_case ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : np.ndarray , lowerCamelCase_ : Dict[str, int] , lowerCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ : Optional[Any] , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = get_size_dict(__snake_case ) if "height" not in size or "width" not in size: raise ValueError(f'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' ) return center_crop(__snake_case , size=(size["""height"""], size["""width"""]) , data_format=__snake_case , **__snake_case ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : np.ndarray , lowerCamelCase_ : Union[int, float] , lowerCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ : Any , ): '''simple docstring''' return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case ) def lowerCamelCase_ ( self : int , lowerCamelCase_ : np.ndarray , lowerCamelCase_ : Union[float, List[float]] , lowerCamelCase_ : Union[float, List[float]] , lowerCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ : Tuple , ): '''simple docstring''' return normalize(__snake_case , mean=__snake_case , std=__snake_case , data_format=__snake_case , **__snake_case ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : ImageInput , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[Dict[str, int]] = None , lowerCamelCase_ : PILImageResampling = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[Dict[str, int]] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[float] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[Union[float, Iterable[float]]] = None , lowerCamelCase_ : Optional[Union[float, Iterable[float]]] = None , lowerCamelCase_ : Optional[TensorType] = None , lowerCamelCase_ : ChannelDimension = ChannelDimension.FIRST , **lowerCamelCase_ : Dict , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE : Optional[Any] = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop SCREAMING_SNAKE_CASE : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale SCREAMING_SNAKE_CASE : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor SCREAMING_SNAKE_CASE : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize SCREAMING_SNAKE_CASE : Dict = image_mean if image_mean is not None else self.image_mean SCREAMING_SNAKE_CASE : Tuple = image_std if image_std is not None else self.image_std SCREAMING_SNAKE_CASE : Any = size if size is not None else self.size SCREAMING_SNAKE_CASE : int = get_size_dict(__snake_case , default_to_square=__snake_case ) SCREAMING_SNAKE_CASE : Optional[int] = crop_size if crop_size is not None else self.crop_size SCREAMING_SNAKE_CASE : Tuple = get_size_dict(__snake_case , param_name="""crop_size""" ) SCREAMING_SNAKE_CASE : List[Any] = make_list_of_images(__snake_case ) if not valid_images(__snake_case ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE : Tuple = [to_numpy_array(__snake_case ) for image in images] if do_resize: SCREAMING_SNAKE_CASE : Optional[int] = [self.resize(__snake_case , __snake_case , __snake_case ) for image in images] if do_center_crop: SCREAMING_SNAKE_CASE : Any = [self.center_crop(__snake_case , __snake_case ) for image in images] if do_rescale: SCREAMING_SNAKE_CASE : int = [self.rescale(__snake_case , __snake_case ) for image in images] if do_normalize: SCREAMING_SNAKE_CASE : Optional[Any] = [self.normalize(__snake_case , __snake_case , __snake_case ) for image in images] SCREAMING_SNAKE_CASE : int = [to_channel_dimension_format(__snake_case , __snake_case ) for image in images] SCREAMING_SNAKE_CASE : List[str] = {'''pixel_values''': images} return BatchFeature(data=__snake_case , tensor_type=__snake_case )
705
'''simple docstring''' from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging __UpperCAmelCase = logging.get_logger(__name__) class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = None @staticmethod def lowerCamelCase_ ( ): '''simple docstring''' raise NotImplementedError def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : str ): '''simple docstring''' raise NotImplementedError def lowerCamelCase_ ( self : int , lowerCamelCase_ : Dict ): '''simple docstring''' raise NotImplementedError def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' if not self.is_available(): raise RuntimeError( f'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' ) @classmethod def lowerCamelCase_ ( cls : Any ): '''simple docstring''' return f'''`pip install {cls.pip_package or cls.name}`''' class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''optuna''' @staticmethod def lowerCamelCase_ ( ): '''simple docstring''' return is_optuna_available() def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : Dict ): '''simple docstring''' return run_hp_search_optuna(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) def lowerCamelCase_ ( self : str , lowerCamelCase_ : Any ): '''simple docstring''' return default_hp_space_optuna(lowerCamelCase_ ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''ray''' SCREAMING_SNAKE_CASE__ = '''\'ray[tune]\'''' @staticmethod def lowerCamelCase_ ( ): '''simple docstring''' return is_ray_available() def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : int ): '''simple docstring''' return run_hp_search_ray(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Optional[int] ): '''simple docstring''' return default_hp_space_ray(lowerCamelCase_ ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''sigopt''' @staticmethod def lowerCamelCase_ ( ): '''simple docstring''' return is_sigopt_available() def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : int ): '''simple docstring''' return run_hp_search_sigopt(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' return default_hp_space_sigopt(lowerCamelCase_ ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''wandb''' @staticmethod def lowerCamelCase_ ( ): '''simple docstring''' return is_wandb_available() def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : Optional[Any] ): '''simple docstring''' return run_hp_search_wandb(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Tuple ): '''simple docstring''' return default_hp_space_wandb(lowerCamelCase_ ) __UpperCAmelCase = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(lowerCamelCase_ ) > 0: SCREAMING_SNAKE_CASE : List[Any] = available_backends[0].name if len(lowerCamelCase_ ) > 1: logger.info( f'''{len(lowerCamelCase_ )} hyperparameter search backends available. Using {name} as the default.''' ) return name raise RuntimeError( """No hyperparameter search backend available.\n""" + """\n""".join( f''' - To install {backend.name} run {backend.pip_install()}''' for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
79
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { """microsoft/cvt-13""": """https://huggingface.co/microsoft/cvt-13/resolve/main/config.json""", # See all Cvt models at https://huggingface.co/models?filter=cvt } class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = "cvt" def __init__( self : Optional[Any] , lowerCamelCase_ : Optional[Any]=3 , lowerCamelCase_ : int=[7, 3, 3] , lowerCamelCase_ : Optional[Any]=[4, 2, 2] , lowerCamelCase_ : int=[2, 1, 1] , lowerCamelCase_ : str=[64, 1_92, 3_84] , lowerCamelCase_ : Union[str, Any]=[1, 3, 6] , lowerCamelCase_ : Dict=[1, 2, 10] , lowerCamelCase_ : List[str]=[4.0, 4.0, 4.0] , lowerCamelCase_ : int=[0.0, 0.0, 0.0] , lowerCamelCase_ : Optional[Any]=[0.0, 0.0, 0.0] , lowerCamelCase_ : Any=[0.0, 0.0, 0.1] , lowerCamelCase_ : int=[True, True, True] , lowerCamelCase_ : Any=[False, False, True] , lowerCamelCase_ : Union[str, Any]=["dw_bn", "dw_bn", "dw_bn"] , lowerCamelCase_ : int=[3, 3, 3] , lowerCamelCase_ : List[Any]=[1, 1, 1] , lowerCamelCase_ : List[Any]=[2, 2, 2] , lowerCamelCase_ : Any=[1, 1, 1] , lowerCamelCase_ : List[str]=[1, 1, 1] , lowerCamelCase_ : Optional[int]=0.02 , lowerCamelCase_ : Optional[int]=1e-12 , **lowerCamelCase_ : str , ): '''simple docstring''' super().__init__(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = num_channels SCREAMING_SNAKE_CASE : List[Any] = patch_sizes SCREAMING_SNAKE_CASE : Optional[int] = patch_stride SCREAMING_SNAKE_CASE : Optional[int] = patch_padding SCREAMING_SNAKE_CASE : List[str] = embed_dim SCREAMING_SNAKE_CASE : List[str] = num_heads SCREAMING_SNAKE_CASE : Optional[Any] = depth SCREAMING_SNAKE_CASE : str = mlp_ratio SCREAMING_SNAKE_CASE : Optional[int] = attention_drop_rate SCREAMING_SNAKE_CASE : Optional[int] = drop_rate SCREAMING_SNAKE_CASE : List[str] = drop_path_rate SCREAMING_SNAKE_CASE : str = qkv_bias SCREAMING_SNAKE_CASE : Any = cls_token SCREAMING_SNAKE_CASE : Any = qkv_projection_method SCREAMING_SNAKE_CASE : int = kernel_qkv SCREAMING_SNAKE_CASE : Any = padding_kv SCREAMING_SNAKE_CASE : Any = stride_kv SCREAMING_SNAKE_CASE : Optional[Any] = padding_q SCREAMING_SNAKE_CASE : Tuple = stride_q SCREAMING_SNAKE_CASE : Tuple = initializer_range SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_eps
706
'''simple docstring''' import glob import os import random from string import ascii_lowercase, digits import cva __UpperCAmelCase = """""" __UpperCAmelCase = """""" __UpperCAmelCase = """""" __UpperCAmelCase = 1 # (0 is vertical, 1 is horizontal) def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = get_dataset(lowerCamelCase_ , lowerCamelCase_ ) print("""Processing...""" ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = update_image_and_anno(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) for index, image in enumerate(lowerCamelCase_ ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' SCREAMING_SNAKE_CASE : Optional[int] = random_chars(32 ) SCREAMING_SNAKE_CASE : Optional[Any] = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0] SCREAMING_SNAKE_CASE : Dict = f'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}''' cva.imwrite(f'''/{file_root}.jpg''' , lowerCamelCase_ , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f'''Success {index+1}/{len(lowerCamelCase_ )} with {file_name}''' ) SCREAMING_SNAKE_CASE : Optional[Any] = [] for anno in new_annos[index]: SCREAMING_SNAKE_CASE : Optional[Any] = f'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}''' annos_list.append(lowerCamelCase_ ) with open(f'''/{file_root}.txt''' , """w""" ) as outfile: outfile.write("""\n""".join(line for line in annos_list ) ) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = [] SCREAMING_SNAKE_CASE : Any = [] for label_file in glob.glob(os.path.join(lowerCamelCase_ , """*.txt""" ) ): SCREAMING_SNAKE_CASE : str = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0] with open(lowerCamelCase_ ) as in_file: SCREAMING_SNAKE_CASE : Any = in_file.readlines() SCREAMING_SNAKE_CASE : List[Any] = os.path.join(lowerCamelCase_ , f'''{label_name}.jpg''' ) SCREAMING_SNAKE_CASE : Tuple = [] for obj_list in obj_lists: SCREAMING_SNAKE_CASE : Union[str, Any] = obj_list.rstrip("""\n""" ).split(""" """ ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(lowerCamelCase_ ) labels.append(lowerCamelCase_ ) return img_paths, labels def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = [] SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : Optional[Any] = [] for idx in range(len(lowerCamelCase_ ) ): SCREAMING_SNAKE_CASE : List[Any] = [] SCREAMING_SNAKE_CASE : Dict = img_list[idx] path_list.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = anno_list[idx] SCREAMING_SNAKE_CASE : Optional[Any] = cva.imread(lowerCamelCase_ ) if flip_type == 1: SCREAMING_SNAKE_CASE : List[str] = cva.flip(lowerCamelCase_ , lowerCamelCase_ ) for bbox in img_annos: SCREAMING_SNAKE_CASE : List[Any] = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: SCREAMING_SNAKE_CASE : Any = cva.flip(lowerCamelCase_ , lowerCamelCase_ ) for bbox in img_annos: SCREAMING_SNAKE_CASE : Optional[Any] = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(lowerCamelCase_ ) new_imgs_list.append(lowerCamelCase_ ) return new_imgs_list, new_annos_lists, path_list def __A ( lowerCamelCase_ = 32 ): """simple docstring""" assert number_char > 1, "The number of character should greater than 1" SCREAMING_SNAKE_CASE : Dict = ascii_lowercase + digits return "".join(random.choice(lowerCamelCase_ ) for _ in range(lowerCamelCase_ ) ) if __name__ == "__main__": main() print("""DONE ✅""")
79
0
'''simple docstring''' from collections import defaultdict from math import ceil, sqrt def __A ( lowerCamelCase_ = 1_00_00_00 , lowerCamelCase_ = 10 ): """simple docstring""" SCREAMING_SNAKE_CASE : defaultdict = defaultdict(lowercase_ ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: SCREAMING_SNAKE_CASE : Any = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: SCREAMING_SNAKE_CASE : str = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(lowercase_ , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 10 ) if __name__ == "__main__": print(f'''{solution() = }''')
707
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { """google/vivit-b-16x2-kinetics400""": ( """https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json""" ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''vivit''' def __init__( self : Tuple , lowerCamelCase_ : str=2_24 , lowerCamelCase_ : List[Any]=32 , lowerCamelCase_ : Tuple=[2, 16, 16] , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Dict=7_68 , lowerCamelCase_ : Dict=12 , lowerCamelCase_ : Any=12 , lowerCamelCase_ : List[Any]=30_72 , lowerCamelCase_ : List[str]="gelu_fast" , lowerCamelCase_ : str=0.0 , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : Optional[int]=0.02 , lowerCamelCase_ : List[Any]=1e-06 , lowerCamelCase_ : Tuple=True , **lowerCamelCase_ : Tuple , ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = hidden_size SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers SCREAMING_SNAKE_CASE : List[str] = num_attention_heads SCREAMING_SNAKE_CASE : str = intermediate_size SCREAMING_SNAKE_CASE : List[Any] = hidden_act SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Tuple = initializer_range SCREAMING_SNAKE_CASE : str = layer_norm_eps SCREAMING_SNAKE_CASE : str = image_size SCREAMING_SNAKE_CASE : Dict = num_frames SCREAMING_SNAKE_CASE : Optional[Any] = tubelet_size SCREAMING_SNAKE_CASE : Dict = num_channels SCREAMING_SNAKE_CASE : int = qkv_bias super().__init__(**lowerCamelCase_ )
79
0
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class UpperCamelCase__ ( UpperCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = "naver-clova-ix/donut-base-finetuned-docvqa" SCREAMING_SNAKE_CASE__ = ( "This is a tool that answers a question about an document (pdf). It takes an input named `document` which " "should be the document containing the information, as well as a `question` that is the question about the " "document. It returns a text that contains the answer to the question." ) SCREAMING_SNAKE_CASE__ = "document_qa" SCREAMING_SNAKE_CASE__ = AutoProcessor SCREAMING_SNAKE_CASE__ = VisionEncoderDecoderModel SCREAMING_SNAKE_CASE__ = ["image", "text"] SCREAMING_SNAKE_CASE__ = ["text"] def __init__( self : Optional[int] , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Tuple ): '''simple docstring''' if not is_vision_available(): raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" ) super().__init__(*_snake_case , **_snake_case ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : "Image" , lowerCamelCase_ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = """<s_docvqa><s_question>{user_input}</s_question><s_answer>""" SCREAMING_SNAKE_CASE : Any = task_prompt.replace("""{user_input}""" , _snake_case ) SCREAMING_SNAKE_CASE : List[Any] = self.pre_processor.tokenizer( _snake_case , add_special_tokens=_snake_case , return_tensors="""pt""" ).input_ids SCREAMING_SNAKE_CASE : Tuple = self.pre_processor(_snake_case , return_tensors="""pt""" ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def lowerCamelCase_ ( self : int , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' return self.model.generate( inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_snake_case , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_snake_case , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_snake_case , ).sequences def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.pre_processor.batch_decode(_snake_case )[0] SCREAMING_SNAKE_CASE : Any = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" ) SCREAMING_SNAKE_CASE : str = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" ) SCREAMING_SNAKE_CASE : Optional[int] = re.sub(R"""<.*?>""" , """""" , _snake_case , count=1 ).strip() # remove first task start token SCREAMING_SNAKE_CASE : Optional[int] = self.pre_processor.tokenajson(_snake_case ) return sequence["answer"]
708
'''simple docstring''' import math class UpperCamelCase__ : """simple docstring""" def __init__( self : List[str] , lowerCamelCase_ : Tuple=0 ): # a graph with Node 0,1,...,N-1 '''simple docstring''' SCREAMING_SNAKE_CASE : Any = n SCREAMING_SNAKE_CASE : Optional[int] = [ [math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ ) ] # adjacency matrix for weight SCREAMING_SNAKE_CASE : Union[str, Any] = [ [math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ ) ] # dp[i][j] stores minimum distance from i to j def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = w def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): SCREAMING_SNAKE_CASE : Union[str, Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' return self.dp[u][v] if __name__ == "__main__": __UpperCAmelCase = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
79
0
'''simple docstring''' from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=_lowerCAmelCase ) class UpperCamelCase__ ( _lowerCAmelCase ): """simple docstring""" # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization SCREAMING_SNAKE_CASE__ = field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) SCREAMING_SNAKE_CASE__ = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} ) SCREAMING_SNAKE_CASE__ = Features( { '''answers''': Sequence( { '''text''': Value('''string''' ), '''answer_start''': Value('''int32''' ), } ) } ) SCREAMING_SNAKE_CASE__ = "question" SCREAMING_SNAKE_CASE__ = "context" SCREAMING_SNAKE_CASE__ = "answers" @property def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
709
'''simple docstring''' import math def __A ( lowerCamelCase_ ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __A ( lowerCamelCase_ = 1_00_01 ): """simple docstring""" try: SCREAMING_SNAKE_CASE : Tuple = int(lowerCamelCase_ ) except (TypeError, ValueError): raise TypeError("""Parameter nth must be int or castable to int.""" ) from None if nth <= 0: raise ValueError("""Parameter nth must be greater than or equal to one.""" ) SCREAMING_SNAKE_CASE : list[int] = [] SCREAMING_SNAKE_CASE : Dict = 2 while len(lowerCamelCase_ ) < nth: if is_prime(lowerCamelCase_ ): primes.append(lowerCamelCase_ ) num += 1 else: num += 1 return primes[len(lowerCamelCase_ ) - 1] if __name__ == "__main__": print(f'''{solution() = }''')
79
0
import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @property def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : int = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , ) return model def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.dummy_uncond_unet SCREAMING_SNAKE_CASE : Tuple = KarrasVeScheduler() SCREAMING_SNAKE_CASE : Any = KarrasVePipeline(unet=_a , scheduler=_a ) pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[Any] = pipe(num_inference_steps=2 , generator=_a , output_type="""numpy""" ).images SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Tuple = pipe(num_inference_steps=2 , generator=_a , output_type="""numpy""" , return_dict=_a )[0] SCREAMING_SNAKE_CASE : Tuple = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE : Dict = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = """google/ncsnpp-celebahq-256""" SCREAMING_SNAKE_CASE : str = UNetaDModel.from_pretrained(_a ) SCREAMING_SNAKE_CASE : int = KarrasVeScheduler() SCREAMING_SNAKE_CASE : str = KarrasVePipeline(unet=_a , scheduler=_a ) pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) SCREAMING_SNAKE_CASE : int = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Tuple = pipe(num_inference_steps=20 , generator=_a , output_type="""numpy""" ).images SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) SCREAMING_SNAKE_CASE : Optional[Any] = np.array([0.578, 0.5_811, 0.5_924, 0.5_809, 0.587, 0.5_886, 0.5_861, 0.5_802, 0.586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
710
'''simple docstring''' from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent __UpperCAmelCase = {"""UserAgent""": UserAgent().random} def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = script.contents[0] SCREAMING_SNAKE_CASE : int = json.loads(data[data.find("""{\"config\"""" ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class UpperCamelCase__ : """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = f'''https://www.instagram.com/{username}/''' SCREAMING_SNAKE_CASE : Any = self.get_json() def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = requests.get(self.url , headers=lowerCamelCase_ ).text SCREAMING_SNAKE_CASE : List[Any] = BeautifulSoup(lowerCamelCase_ , """html.parser""" ).find_all("""script""" ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__( self : Dict ): '''simple docstring''' return f'''{self.__class__.__name__}(\'{self.username}\')''' def __str__( self : int ): '''simple docstring''' return f'''{self.fullname} ({self.username}) is {self.biography}''' @property def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' return self.user_data["username"] @property def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' return self.user_data["full_name"] @property def lowerCamelCase_ ( self : int ): '''simple docstring''' return self.user_data["biography"] @property def lowerCamelCase_ ( self : str ): '''simple docstring''' return self.user_data["business_email"] @property def lowerCamelCase_ ( self : str ): '''simple docstring''' return self.user_data["external_url"] @property def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return self.user_data["edge_followed_by"]["count"] @property def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return self.user_data["edge_follow"]["count"] @property def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return self.user_data["edge_owner_to_timeline_media"]["count"] @property def lowerCamelCase_ ( self : str ): '''simple docstring''' return self.user_data["profile_pic_url_hd"] @property def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return self.user_data["is_verified"] @property def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' return self.user_data["is_private"] def __A ( lowerCamelCase_ = "github" ): """simple docstring""" import os if os.environ.get("""CI""" ): return # test failing on GitHub Actions SCREAMING_SNAKE_CASE : Any = InstagramUser(lowerCamelCase_ ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , lowerCamelCase_ ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 1_50 assert instagram_user.number_of_followers > 12_00_00 assert instagram_user.number_of_followings > 15 assert instagram_user.email == "support@github.com" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith("""https://instagram.""" ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() __UpperCAmelCase = InstagramUser("""github""") print(instagram_user) print(f'''{instagram_user.number_of_posts = }''') print(f'''{instagram_user.number_of_followers = }''') print(f'''{instagram_user.number_of_followings = }''') print(f'''{instagram_user.email = }''') print(f'''{instagram_user.website = }''') print(f'''{instagram_user.profile_picture_url = }''') print(f'''{instagram_user.is_verified = }''') print(f'''{instagram_user.is_private = }''')
79
0
'''simple docstring''' import contextlib import importlib import io import unittest import transformers # Try to import everything from transformers to ensure every object can be loaded. from transformers import * # noqa F406 from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available if is_torch_available(): from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification if is_tf_available(): from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification if is_flax_available(): from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification __UpperCAmelCase = DUMMY_UNKNOWN_IDENTIFIER # An actual model hosted on huggingface.co __UpperCAmelCase = 'main' # Default branch name __UpperCAmelCase = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2' # One particular commit (not the top of `main`) __UpperCAmelCase = 'aaaaaaa' # This commit does not exist, so we should 404. __UpperCAmelCase = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684' # Sha-1 of config.json on the top of `main`, for checking purposes __UpperCAmelCase = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3' @contextlib.contextmanager def __A ( ): """simple docstring""" print("""Welcome!""" ) yield print("""Bye!""" ) @contextlib.contextmanager def __A ( ): """simple docstring""" print("""Bonjour!""" ) yield print("""Au revoir!""" ) class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase_ ( self : Any ): '''simple docstring''' assert transformers.__spec__ is not None assert importlib.util.find_spec("""transformers""" ) is not None class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO ) def lowerCamelCase_ ( self : int , lowerCamelCase_ : List[str] ): '''simple docstring''' with ContextManagers([] ): print("""Transformers are awesome!""" ) # The print statement adds a new line at the end of the output self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" ) @unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO ) def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int ): '''simple docstring''' with ContextManagers([context_en()] ): print("""Transformers are awesome!""" ) # The output should be wrapped with an English welcome and goodbye self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" ) @unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[Any] ): '''simple docstring''' with ContextManagers([context_fr(), context_en()] ): print("""Transformers are awesome!""" ) # The output should be wrapped with an English and French welcome and goodbye self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" ) @require_torch def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' self.assertEqual(find_labels(UpperCamelCase__ ) , ["""labels"""] ) self.assertEqual(find_labels(UpperCamelCase__ ) , ["""labels""", """next_sentence_label"""] ) self.assertEqual(find_labels(UpperCamelCase__ ) , ["""start_positions""", """end_positions"""] ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" pass self.assertEqual(find_labels(UpperCamelCase__ ) , ["""labels"""] ) @require_tf def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' self.assertEqual(find_labels(UpperCamelCase__ ) , ["""labels"""] ) self.assertEqual(find_labels(UpperCamelCase__ ) , ["""labels""", """next_sentence_label"""] ) self.assertEqual(find_labels(UpperCamelCase__ ) , ["""start_positions""", """end_positions"""] ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" pass self.assertEqual(find_labels(UpperCamelCase__ ) , ["""labels"""] ) @require_flax def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' self.assertEqual(find_labels(UpperCamelCase__ ) , [] ) self.assertEqual(find_labels(UpperCamelCase__ ) , [] ) self.assertEqual(find_labels(UpperCamelCase__ ) , [] ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" pass self.assertEqual(find_labels(UpperCamelCase__ ) , [] )
711
'''simple docstring''' import argparse import logging from collections import namedtuple import torch from model_bertabs import BertAbsSummarizer from models.model_builder import AbsSummarizer # The authors' implementation from transformers import BertTokenizer logging.basicConfig(level=logging.INFO) __UpperCAmelCase = logging.getLogger(__name__) __UpperCAmelCase = """Hello world! cécé herlolip""" __UpperCAmelCase = namedtuple( """BertAbsConfig""", [ """temp_dir""", """large""", """use_bert_emb""", """finetune_bert""", """encoder""", """share_emb""", """max_pos""", """enc_layers""", """enc_hidden_size""", """enc_heads""", """enc_ff_size""", """enc_dropout""", """dec_layers""", """dec_hidden_size""", """dec_heads""", """dec_ff_size""", """dec_dropout""", ], ) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = BertAbsConfig( temp_dir=""".""" , finetune_bert=lowerCamelCase_ , large=lowerCamelCase_ , share_emb=lowerCamelCase_ , use_bert_emb=lowerCamelCase_ , encoder="""bert""" , max_pos=5_12 , enc_layers=6 , enc_hidden_size=5_12 , enc_heads=8 , enc_ff_size=5_12 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_68 , dec_heads=8 , dec_ff_size=20_48 , dec_dropout=0.2 , ) SCREAMING_SNAKE_CASE : int = torch.load(lowerCamelCase_ , lambda lowerCamelCase_ , lowerCamelCase_ : storage ) SCREAMING_SNAKE_CASE : List[str] = AbsSummarizer(lowerCamelCase_ , torch.device("""cpu""" ) , lowerCamelCase_ ) original.eval() SCREAMING_SNAKE_CASE : Optional[int] = BertAbsSummarizer(lowerCamelCase_ , torch.device("""cpu""" ) ) new_model.eval() # ------------------- # Convert the weights # ------------------- logging.info("""convert the model""" ) new_model.bert.load_state_dict(original.bert.state_dict() ) new_model.decoder.load_state_dict(original.decoder.state_dict() ) new_model.generator.load_state_dict(original.generator.state_dict() ) # ---------------------------------- # Make sure the outpus are identical # ---------------------------------- logging.info("""Make sure that the models' outputs are identical""" ) SCREAMING_SNAKE_CASE : int = BertTokenizer.from_pretrained("""bert-base-uncased""" ) # prepare the model inputs SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.encode("""This is sample éàalj'-.""" ) encoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(lowerCamelCase_ )) ) SCREAMING_SNAKE_CASE : List[str] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 ) SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode("""This is sample 3 éàalj'-.""" ) decoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(lowerCamelCase_ )) ) SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 ) # failsafe to make sure the weights reset does not affect the # loaded weights. assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0 # forward pass SCREAMING_SNAKE_CASE : Optional[int] = encoder_input_ids SCREAMING_SNAKE_CASE : Optional[Any] = decoder_input_ids SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : Tuple = None SCREAMING_SNAKE_CASE : int = None SCREAMING_SNAKE_CASE : List[Any] = None SCREAMING_SNAKE_CASE : Optional[int] = None # The original model does not apply the geneator layer immediatly but rather in # the beam search (where it combines softmax + linear layer). Since we already # apply the softmax in our generation process we only apply the linear layer here. # We make sure that the outputs of the full stack are identical SCREAMING_SNAKE_CASE : str = original(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0] SCREAMING_SNAKE_CASE : Optional[Any] = original.generator(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = new_model( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0] SCREAMING_SNAKE_CASE : str = new_model.generator(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = torch.max(torch.abs(output_converted_model - output_original_model ) ).item() print("""Maximum absolute difference beween weights: {:.2f}""".format(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Optional[int] = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item() print("""Maximum absolute difference beween weights: {:.2f}""".format(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Any = torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) if are_identical: logging.info("""all weights are equal up to 1e-3""" ) else: raise ValueError("""the weights are different. The new model is likely different from the original one.""" ) # The model has been saved with torch.save(model) and this is bound to the exact # directory structure. We save the state_dict instead. logging.info("""saving the model's state dictionary""" ) torch.save( new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument( """--bertabs_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""", ) __UpperCAmelCase = parser.parse_args() convert_bertabs_checkpoints( args.bertabs_checkpoint_path, args.pytorch_dump_folder_path, )
79
0
'''simple docstring''' def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = 0 for ch in input_str: SCREAMING_SNAKE_CASE : int = ord(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = pow(2 , lowerCamelCase_ ) # If we already turned on bit for current character's unicode if bitmap >> ch_unicode & 1 == 1: return False bitmap |= ch_bit_index_on return True if __name__ == "__main__": import doctest doctest.testmod()
712
'''simple docstring''' import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=True , lowerCamelCase_="pt" ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = {"""add_prefix_space""": True} if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and not line.startswith(""" """ ) else {} SCREAMING_SNAKE_CASE : Optional[Any] = padding_side return tokenizer( [line] , max_length=lowerCamelCase_ , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase_ , return_tensors=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , **lowerCamelCase_ , ) def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , ): """simple docstring""" SCREAMING_SNAKE_CASE : int = input_ids.ne(lowerCamelCase_ ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str]="train" , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : int=None , lowerCamelCase_ : Union[str, Any]="" , ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : str = Path(lowerCamelCase_ ).joinpath(type_path + """.source""" ) SCREAMING_SNAKE_CASE : Optional[Any] = Path(lowerCamelCase_ ).joinpath(type_path + """.target""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_char_lens(self.src_file ) SCREAMING_SNAKE_CASE : int = max_source_length SCREAMING_SNAKE_CASE : str = max_target_length assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}''' SCREAMING_SNAKE_CASE : List[str] = tokenizer SCREAMING_SNAKE_CASE : Dict = prefix if n_obs is not None: SCREAMING_SNAKE_CASE : List[Any] = self.src_lens[:n_obs] SCREAMING_SNAKE_CASE : int = src_lang SCREAMING_SNAKE_CASE : Optional[int] = tgt_lang def __len__( self : List[Any] ): '''simple docstring''' return len(self.src_lens ) def __getitem__( self : Union[str, Any] , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = index + 1 # linecache starts at 1 SCREAMING_SNAKE_CASE : Dict = self.prefix + linecache.getline(str(self.src_file ) , lowerCamelCase_ ).rstrip("""\n""" ) SCREAMING_SNAKE_CASE : Dict = linecache.getline(str(self.tgt_file ) , lowerCamelCase_ ).rstrip("""\n""" ) assert source_line, f'''empty source line for index {index}''' assert tgt_line, f'''empty tgt line for index {index}''' # Need to add eos token manually for T5 if isinstance(self.tokenizer , lowerCamelCase_ ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right SCREAMING_SNAKE_CASE : Union[str, Any] = ( self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCamelCase_ ) else self.tokenizer ) SCREAMING_SNAKE_CASE : Any = self.tokenizer.generator if isinstance(self.tokenizer , lowerCamelCase_ ) else self.tokenizer SCREAMING_SNAKE_CASE : Optional[int] = encode_line(lowerCamelCase_ , lowerCamelCase_ , self.max_source_length , """right""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = encode_line(lowerCamelCase_ , lowerCamelCase_ , self.max_target_length , """right""" ) SCREAMING_SNAKE_CASE : Tuple = source_inputs["""input_ids"""].squeeze() SCREAMING_SNAKE_CASE : Tuple = target_inputs["""input_ids"""].squeeze() SCREAMING_SNAKE_CASE : List[str] = source_inputs["""attention_mask"""].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def lowerCamelCase_ ( lowerCamelCase_ : Dict ): '''simple docstring''' return [len(lowerCamelCase_ ) for x in Path(lowerCamelCase_ ).open().readlines()] def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = torch.stack([x["""input_ids"""] for x in batch] ) SCREAMING_SNAKE_CASE : int = torch.stack([x["""attention_mask"""] for x in batch] ) SCREAMING_SNAKE_CASE : int = torch.stack([x["""decoder_input_ids"""] for x in batch] ) SCREAMING_SNAKE_CASE : int = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer , lowerCamelCase_ ) else self.tokenizer.pad_token_id ) SCREAMING_SNAKE_CASE : Dict = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer , lowerCamelCase_ ) else self.tokenizer.pad_token_id ) SCREAMING_SNAKE_CASE : Optional[int] = trim_batch(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = trim_batch(lowerCamelCase_ , lowerCamelCase_ , attention_mask=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = { """input_ids""": source_ids, """attention_mask""": source_mask, """decoder_input_ids""": y, } return batch __UpperCAmelCase = getLogger(__name__) def __A ( lowerCamelCase_ ): """simple docstring""" return list(itertools.chain.from_iterable(lowerCamelCase_ ) ) def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = get_git_info() save_json(lowerCamelCase_ , os.path.join(lowerCamelCase_ , """git_log.json""" ) ) def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=4 , **lowerCamelCase_ ): """simple docstring""" with open(lowerCamelCase_ , """w""" ) as f: json.dump(lowerCamelCase_ , lowerCamelCase_ , indent=lowerCamelCase_ , **lowerCamelCase_ ) def __A ( lowerCamelCase_ ): """simple docstring""" with open(lowerCamelCase_ ) as f: return json.load(lowerCamelCase_ ) def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = git.Repo(search_parent_directories=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = { """repo_id""": str(lowerCamelCase_ ), """repo_sha""": str(repo.head.object.hexsha ), """repo_branch""": str(repo.active_branch ), """hostname""": str(socket.gethostname() ), } return repo_infos def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return list(map(lowerCamelCase_ , lowerCamelCase_ ) ) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" with open(lowerCamelCase_ , """wb""" ) as f: return pickle.dump(lowerCamelCase_ , lowerCamelCase_ ) def __A ( lowerCamelCase_ ): """simple docstring""" def remove_articles(lowerCamelCase_ ): return re.sub(R"""\b(a|an|the)\b""" , """ """ , lowerCamelCase_ ) def white_space_fix(lowerCamelCase_ ): return " ".join(text.split() ) def remove_punc(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : int = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(lowerCamelCase_ ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase_ ) ) ) ) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = normalize_answer(lowerCamelCase_ ).split() SCREAMING_SNAKE_CASE : Optional[int] = normalize_answer(lowerCamelCase_ ).split() SCREAMING_SNAKE_CASE : Tuple = Counter(lowerCamelCase_ ) & Counter(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = sum(common.values() ) if num_same == 0: return 0 SCREAMING_SNAKE_CASE : Optional[int] = 1.0 * num_same / len(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = 1.0 * num_same / len(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = (2 * precision * recall) / (precision + recall) return fa def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return normalize_answer(lowerCamelCase_ ) == normalize_answer(lowerCamelCase_ ) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = 0 for hypo, pred in zip(lowerCamelCase_ , lowerCamelCase_ ): em += exact_match_score(lowerCamelCase_ , lowerCamelCase_ ) if len(lowerCamelCase_ ) > 0: em /= len(lowerCamelCase_ ) return {"em": em} def __A ( lowerCamelCase_ ): """simple docstring""" return model_prefix.startswith("""rag""" ) def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead SCREAMING_SNAKE_CASE : Dict = """dropout_rate""" for p in extra_params: if getattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): if not hasattr(lowerCamelCase_ , lowerCamelCase_ ) and not hasattr(lowerCamelCase_ , equivalent_param[p] ): logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase_ ) ) delattr(lowerCamelCase_ , lowerCamelCase_ ) continue SCREAMING_SNAKE_CASE : Dict = p if hasattr(lowerCamelCase_ , lowerCamelCase_ ) else equivalent_param[p] setattr(lowerCamelCase_ , lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) ) delattr(lowerCamelCase_ , lowerCamelCase_ ) return hparams, config
79
0
'''simple docstring''' from abc import ABC, abstractmethod from typing import List, Optional class UpperCamelCase__ ( _A ): """simple docstring""" def __init__( self : str ): '''simple docstring''' self.test() def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = 0 SCREAMING_SNAKE_CASE : str = False while not completed: if counter == 1: self.reset() SCREAMING_SNAKE_CASE : Optional[int] = self.advance() if not self.does_advance(lowerCamelCase_ ): raise Exception( """Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.update(lowerCamelCase_ ) counter += 1 if counter > 1_00_00: raise Exception("""update() does not fulfill the constraint.""" ) if self.remaining() != 0: raise Exception("""Custom Constraint is not defined correctly.""" ) @abstractmethod def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Tuple ): '''simple docstring''' raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Any ): '''simple docstring''' raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def lowerCamelCase_ ( self : int , lowerCamelCase_ : int=False ): '''simple docstring''' raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class UpperCamelCase__ ( _A ): """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase_ : str ): '''simple docstring''' super(lowerCamelCase_ , self ).__init__() if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or len(lowerCamelCase_ ) == 0: raise ValueError(f'''`token_ids` has to be a non-empty list, but is {token_ids}.''' ) if any((not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or token_id < 0) for token_id in token_ids ): raise ValueError(f'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' ) SCREAMING_SNAKE_CASE : List[Any] = token_ids SCREAMING_SNAKE_CASE : List[str] = len(self.token_ids ) SCREAMING_SNAKE_CASE : Optional[Any] = -1 # the index of the currently fulfilled step SCREAMING_SNAKE_CASE : Dict = False def lowerCamelCase_ ( self : int ): '''simple docstring''' if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' if not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(lowerCamelCase_ )}''' ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' if not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(lowerCamelCase_ )}''' ) SCREAMING_SNAKE_CASE : Tuple = False SCREAMING_SNAKE_CASE : str = False SCREAMING_SNAKE_CASE : Tuple = False if self.does_advance(lowerCamelCase_ ): self.fulfilled_idx += 1 SCREAMING_SNAKE_CASE : int = True if self.fulfilled_idx == (self.seqlen - 1): SCREAMING_SNAKE_CASE : List[Any] = True SCREAMING_SNAKE_CASE : Tuple = completed else: # failed to make progress. SCREAMING_SNAKE_CASE : List[Any] = True self.reset() return stepped, completed, reset def lowerCamelCase_ ( self : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = False SCREAMING_SNAKE_CASE : Union[str, Any] = 0 def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' return self.seqlen - (self.fulfilled_idx + 1) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[str]=False ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = PhrasalConstraint(self.token_ids ) if stateful: SCREAMING_SNAKE_CASE : str = self.seqlen SCREAMING_SNAKE_CASE : Optional[Any] = self.fulfilled_idx SCREAMING_SNAKE_CASE : List[str] = self.completed return new_constraint class UpperCamelCase__ : """simple docstring""" def __init__( self : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str]=True ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = max([len(lowerCamelCase_ ) for one in nested_token_ids] ) SCREAMING_SNAKE_CASE : Union[str, Any] = {} for token_ids in nested_token_ids: SCREAMING_SNAKE_CASE : Tuple = root for tidx, token_id in enumerate(lowerCamelCase_ ): if token_id not in level: SCREAMING_SNAKE_CASE : Tuple = {} SCREAMING_SNAKE_CASE : List[str] = level[token_id] if no_subsets and self.has_subsets(lowerCamelCase_ , lowerCamelCase_ ): raise ValueError( """Each list in `nested_token_ids` can't be a complete subset of another list, but is""" f''' {nested_token_ids}.''' ) SCREAMING_SNAKE_CASE : List[Any] = root def lowerCamelCase_ ( self : int , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.trie for current_token in current_seq: SCREAMING_SNAKE_CASE : Any = start[current_token] SCREAMING_SNAKE_CASE : Union[str, Any] = list(start.keys() ) return next_tokens def lowerCamelCase_ ( self : int , lowerCamelCase_ : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.next_tokens(lowerCamelCase_ ) return len(lowerCamelCase_ ) == 0 def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = list(root.values() ) if len(lowerCamelCase_ ) == 0: return 1 else: return sum([self.count_leaves(lowerCamelCase_ ) for nn in next_nodes] ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.count_leaves(lowerCamelCase_ ) return len(lowerCamelCase_ ) != leaf_count class UpperCamelCase__ ( _A ): """simple docstring""" def __init__( self : str , lowerCamelCase_ : Any ): '''simple docstring''' super(lowerCamelCase_ , self ).__init__() if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or len(lowerCamelCase_ ) == 0: raise ValueError(f'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' ) if any(not isinstance(lowerCamelCase_ , lowerCamelCase_ ) for token_ids in nested_token_ids ): raise ValueError(f'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' ) if any( any((not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( f'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' ) SCREAMING_SNAKE_CASE : Dict = DisjunctiveTrie(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = nested_token_ids SCREAMING_SNAKE_CASE : Tuple = self.trie.max_height SCREAMING_SNAKE_CASE : Tuple = [] SCREAMING_SNAKE_CASE : str = False def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.trie.next_tokens(self.current_seq ) if len(lowerCamelCase_ ) == 0: return None else: return token_list def lowerCamelCase_ ( self : int , lowerCamelCase_ : List[Any] ): '''simple docstring''' if not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(lowerCamelCase_ )}''' ) SCREAMING_SNAKE_CASE : Optional[Any] = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : int ): '''simple docstring''' if not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(lowerCamelCase_ )}''' ) SCREAMING_SNAKE_CASE : Optional[int] = False SCREAMING_SNAKE_CASE : Tuple = False SCREAMING_SNAKE_CASE : Dict = False if self.does_advance(lowerCamelCase_ ): self.current_seq.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = True else: SCREAMING_SNAKE_CASE : Optional[int] = True self.reset() SCREAMING_SNAKE_CASE : Dict = self.trie.reached_leaf(self.current_seq ) SCREAMING_SNAKE_CASE : Any = completed return stepped, completed, reset def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = False SCREAMING_SNAKE_CASE : Optional[int] = [] def lowerCamelCase_ ( self : str ): '''simple docstring''' if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Tuple=False ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = DisjunctiveConstraint(self.token_ids ) if stateful: SCREAMING_SNAKE_CASE : str = self.seqlen SCREAMING_SNAKE_CASE : Tuple = self.current_seq SCREAMING_SNAKE_CASE : Optional[Any] = self.completed return new_constraint class UpperCamelCase__ : """simple docstring""" def __init__( self : str , lowerCamelCase_ : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = constraints # max # of steps required to fulfill a given constraint SCREAMING_SNAKE_CASE : Any = max([c.seqlen for c in constraints] ) SCREAMING_SNAKE_CASE : Dict = len(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = False self.init_state() def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = [] SCREAMING_SNAKE_CASE : Tuple = None SCREAMING_SNAKE_CASE : Any = [constraint.copy(stateful=lowerCamelCase_ ) for constraint in self.constraints] def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" SCREAMING_SNAKE_CASE : int = constraint.advance() if isinstance(lowerCamelCase_ , lowerCamelCase_ ): token_list.append(lowerCamelCase_ ) elif isinstance(lowerCamelCase_ , lowerCamelCase_ ): token_list.extend(lowerCamelCase_ ) else: SCREAMING_SNAKE_CASE : List[Any] = self.inprogress_constraint.advance() if isinstance(lowerCamelCase_ , lowerCamelCase_ ): token_list.append(lowerCamelCase_ ) elif isinstance(lowerCamelCase_ , lowerCamelCase_ ): token_list.extend(lowerCamelCase_ ) if len(lowerCamelCase_ ) == 0: return None else: return token_list def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : str ): '''simple docstring''' self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint SCREAMING_SNAKE_CASE : Optional[Any] = self.add(lowerCamelCase_ ) # the entire list of constraints are fulfilled if self.completed: break def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Tuple ): '''simple docstring''' if not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise ValueError(f'''`token_id` should be an `int`, but is `{token_id}`.''' ) SCREAMING_SNAKE_CASE : Optional[Any] = False, False if self.completed: SCREAMING_SNAKE_CASE : List[Any] = True SCREAMING_SNAKE_CASE : str = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state SCREAMING_SNAKE_CASE : Dict = self.inprogress_constraint.update(lowerCamelCase_ ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Any = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) SCREAMING_SNAKE_CASE : str = None if len(self.pending_constraints ) == 0: # we're done! SCREAMING_SNAKE_CASE : int = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Union[str, Any] = pending_constraint.update(lowerCamelCase_ ) if not stepped: raise Exception( """`constraint.update(token_id)` is not yielding incremental progress, """ """even though `constraint.does_advance(token_id)` is true.""" ) if complete: self.complete_constraints.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = None if not complete and stepped: SCREAMING_SNAKE_CASE : Union[str, Any] = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". SCREAMING_SNAKE_CASE : List[str] = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. SCREAMING_SNAKE_CASE : int = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Union[str, Any]=True ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: SCREAMING_SNAKE_CASE : Optional[Any] = [ constraint.copy(stateful=lowerCamelCase_ ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: SCREAMING_SNAKE_CASE : List[Any] = self.inprogress_constraint.copy(stateful=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = [constraint.copy() for constraint in self.pending_constraints] return new_state
713
'''simple docstring''' def __A ( lowerCamelCase_ ): """simple docstring""" return 1 if digit in (0, 1) else (digit * factorial(digit - 1 )) def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = 0 SCREAMING_SNAKE_CASE : List[str] = number while duplicate > 0: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = divmod(lowerCamelCase_ , 10 ) fact_sum += factorial(lowerCamelCase_ ) return fact_sum == number if __name__ == "__main__": print("""Program to check whether a number is a Krisnamurthy Number or not.""") __UpperCAmelCase = int(input("""Enter number: """).strip()) print( f'''{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number.''' )
79
0
import argparse import json import subprocess def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = [] SCREAMING_SNAKE_CASE : Dict = ( f'''curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"''' """ https://api.github.com/repos/huggingface/transformers/actions/runners""" ) SCREAMING_SNAKE_CASE : int = subprocess.run(a__ , shell=a__ , stdout=subprocess.PIPE ) SCREAMING_SNAKE_CASE : Optional[int] = output.stdout.decode("""utf-8""" ) SCREAMING_SNAKE_CASE : Tuple = json.loads(a__ ) SCREAMING_SNAKE_CASE : Any = status["""runners"""] for runner in runners: if runner["name"] in target_runners: if runner["status"] == "offline": offline_runners.append(a__ ) # save the result so we can report them on Slack with open("""offline_runners.txt""" , """w""" ) as fp: fp.write(json.dumps(a__ ) ) if len(a__ ) > 0: SCREAMING_SNAKE_CASE : Tuple = """\n""".join([x["""name"""] for x in offline_runners] ) raise ValueError(f'''The following runners are offline:\n{failed}''' ) if __name__ == "__main__": def __A ( lowerCamelCase_ ): """simple docstring""" return values.split(""",""" ) __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--target_runners""", default=None, type=list_str, required=True, help="""Comma-separated list of runners to check status.""", ) parser.add_argument( """--token""", default=None, type=str, required=True, help="""A token that has actions:read permission.""" ) __UpperCAmelCase = parser.parse_args() get_runner_status(args.target_runners, args.token)
714
'''simple docstring''' # Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING import numpy as np import pyarrow as pa from .. import config from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import torch class UpperCamelCase__ ( TensorFormatter[Mapping, '''torch.Tensor''', Mapping] ): """simple docstring""" def __init__( self : Tuple , lowerCamelCase_ : str=None , **lowerCamelCase_ : Dict ): '''simple docstring''' super().__init__(features=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = torch_tensor_kwargs import torch # noqa import torch at initialization def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' import torch if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and column: if all( isinstance(lowerCamelCase_ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return torch.stack(lowerCamelCase_ ) return column def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : int ): '''simple docstring''' import torch if isinstance(lowerCamelCase_ , (str, bytes, type(lowerCamelCase_ )) ): return value elif isinstance(lowerCamelCase_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() SCREAMING_SNAKE_CASE : str = {} if isinstance(lowerCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): SCREAMING_SNAKE_CASE : Any = {"""dtype""": torch.intaa} elif isinstance(lowerCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): SCREAMING_SNAKE_CASE : int = {"""dtype""": torch.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(lowerCamelCase_ , PIL.Image.Image ): SCREAMING_SNAKE_CASE : List[Any] = np.asarray(lowerCamelCase_ ) return torch.tensor(lowerCamelCase_ , **{**default_dtype, **self.torch_tensor_kwargs} ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' import torch # support for torch, tf, jax etc. if hasattr(lowerCamelCase_ , """__array__""" ) and not isinstance(lowerCamelCase_ , torch.Tensor ): SCREAMING_SNAKE_CASE : Dict = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(lowerCamelCase_ , np.ndarray ): if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(lowerCamelCase_ ) for substruct in data_struct] ) elif isinstance(lowerCamelCase_ , (list, tuple) ): return self._consolidate([self.recursive_tensorize(lowerCamelCase_ ) for substruct in data_struct] ) return self._tensorize(lowerCamelCase_ ) def lowerCamelCase_ ( self : int , lowerCamelCase_ : dict ): '''simple docstring''' return map_nested(self._recursive_tensorize , lowerCamelCase_ , map_list=lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : pa.Table ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.numpy_arrow_extractor().extract_row(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = self.python_features_decoder.decode_row(lowerCamelCase_ ) return self.recursive_tensorize(lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : pa.Table ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.numpy_arrow_extractor().extract_column(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = self.python_features_decoder.decode_column(lowerCamelCase_ , pa_table.column_names[0] ) SCREAMING_SNAKE_CASE : List[str] = self.recursive_tensorize(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = self._consolidate(lowerCamelCase_ ) return column def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : pa.Table ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.numpy_arrow_extractor().extract_batch(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = self.python_features_decoder.decode_batch(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = self.recursive_tensorize(lowerCamelCase_ ) for column_name in batch: SCREAMING_SNAKE_CASE : Tuple = self._consolidate(batch[column_name] ) return batch
79
0
'''simple docstring''' import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpta, recopy_gpta, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPTaLMHeadModel def __A ( lowerCamelCase_=32 , lowerCamelCase_=10 , lowerCamelCase_=1_00 , lowerCamelCase_=10_26 , lowerCamelCase_=True , lowerCamelCase_="data/tokenized_stories_train_wikitext103.jbl" , lowerCamelCase_="igf_context_pairs.jbl" , ): """simple docstring""" set_seed(3 ) # generate train_data and objective_set SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = generate_datasets( UpperCamelCase__ , UpperCamelCase__ , number=UpperCamelCase__ , min_len=10_26 , trim=UpperCamelCase__ ) # keeps model same across runs set_seed(4 ) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? SCREAMING_SNAKE_CASE : List[Any] = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" ) # load pretrained model SCREAMING_SNAKE_CASE : int = load_gpta("""gpt2""" ).to(UpperCamelCase__ ) print("""computing perplexity on objective set""" ) SCREAMING_SNAKE_CASE : Tuple = compute_perplexity(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).item() print("""perplexity on objective set:""" , UpperCamelCase__ ) # collect igf pairs and save to file demo.jbl collect_objective_set(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def __A ( lowerCamelCase_ , lowerCamelCase_=15 , lowerCamelCase_=1_28 , lowerCamelCase_=1_00 , lowerCamelCase_="igf_model.pt" , ): """simple docstring""" set_seed(42 ) # Load pre-trained model SCREAMING_SNAKE_CASE : Optional[int] = GPTaLMHeadModel.from_pretrained("""gpt2""" ) # Initialize secondary learner to use embedding weights of model SCREAMING_SNAKE_CASE : List[str] = SecondaryLearner(UpperCamelCase__ ) # Train secondary learner SCREAMING_SNAKE_CASE : Union[str, Any] = train_secondary_learner( UpperCamelCase__ , UpperCamelCase__ , max_epochs=UpperCamelCase__ , batch_size=UpperCamelCase__ , eval_freq=1_00 , igf_model_path=UpperCamelCase__ , ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=32 , lowerCamelCase_=10_00 , lowerCamelCase_=16 , lowerCamelCase_=1.0 , lowerCamelCase_=recopy_gpta , lowerCamelCase_=None , lowerCamelCase_=10 , lowerCamelCase_="gpt2_finetuned.pt" , ): """simple docstring""" SCREAMING_SNAKE_CASE : str = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" ) SCREAMING_SNAKE_CASE : Dict = RandomSampler(UpperCamelCase__ ) SCREAMING_SNAKE_CASE : Optional[int] = DataLoader(UpperCamelCase__ , sampler=UpperCamelCase__ ) SCREAMING_SNAKE_CASE : str = max_steps // (len(UpperCamelCase__ )) + 1 SCREAMING_SNAKE_CASE : List[Any] = 0 SCREAMING_SNAKE_CASE : int = torch.zeros((1, context_len) , dtype=torch.long , device=UpperCamelCase__ ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = recopy_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) model.train() if secondary_learner is not None: secondary_learner.to(UpperCamelCase__ ) secondary_learner.eval() SCREAMING_SNAKE_CASE : str = [] SCREAMING_SNAKE_CASE : Tuple = 0 SCREAMING_SNAKE_CASE : str = [] SCREAMING_SNAKE_CASE : List[Any] = [] # Compute the performance of the transformer model at the beginning SCREAMING_SNAKE_CASE : Dict = compute_perplexity(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) test_perps.append(UpperCamelCase__ ) print("""Test perplexity, step""" , UpperCamelCase__ , """:""" , UpperCamelCase__ ) for epoch in range(int(UpperCamelCase__ ) ): for step, example in enumerate(UpperCamelCase__ ): torch.cuda.empty_cache() SCREAMING_SNAKE_CASE : List[str] = random.randint(0 , example.size(2 ) - context_len - 1 ) SCREAMING_SNAKE_CASE : Optional[int] = example[0, 0, start : start + context_len] lm_optimizer.zero_grad() SCREAMING_SNAKE_CASE : List[str] = model(UpperCamelCase__ , labels=UpperCamelCase__ ) SCREAMING_SNAKE_CASE : List[str] = True if secondary_learner is not None: SCREAMING_SNAKE_CASE : Optional[int] = secondary_learner.forward( torch.tensor(UpperCamelCase__ , dtype=torch.long , device=UpperCamelCase__ ).unsqueeze(0 ) )[0].item() observed_qs.append(float(UpperCamelCase__ ) ) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 10: SCREAMING_SNAKE_CASE : List[str] = -1 if predicted_q < threshold: SCREAMING_SNAKE_CASE : List[Any] = False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu() ) ) SCREAMING_SNAKE_CASE : Dict = outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() SCREAMING_SNAKE_CASE : Dict = 0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 ) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: SCREAMING_SNAKE_CASE : int = compute_perplexity(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) test_perps.append(UpperCamelCase__ ) print("""Test perplexity, step""" , UpperCamelCase__ , """:""" , UpperCamelCase__ ) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 60: break if max_steps > 0 and global_step > 60: break # save finetuned transformer model torch.save(model.state_dict() , UpperCamelCase__ ) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" ) # Required parameters parser.add_argument( """--data_dir""" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""The input data dir. Should contain data files for WikiText.""" , ) parser.add_argument( """--model_name_or_path""" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""Path to pretrained model or model identifier from huggingface.co/models""" , ) parser.add_argument( """--data_file""" , type=UpperCamelCase__ , default=UpperCamelCase__ , help=( """A jbl file containing tokenized data which can be split as objective dataset, """ """train_dataset and test_dataset.""" ) , ) parser.add_argument( """--igf_data_file""" , type=UpperCamelCase__ , default=UpperCamelCase__ , help="""A jbl file containing the context and information gain pairs to train secondary learner.""" , ) parser.add_argument( """--output_dir""" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""The output directory where the final fine-tuned model is stored.""" , ) parser.add_argument( """--tokenizer_name""" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="""Pretrained tokenizer name or path if not the same as model_name""" , ) parser.add_argument("""--seed""" , type=UpperCamelCase__ , default=UpperCamelCase__ , help="""A seed for reproducible training.""" ) parser.add_argument( """--context_len""" , default=32 , type=UpperCamelCase__ , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--size_objective_set""" , default=1_00 , type=UpperCamelCase__ , help="""number of articles that are long enough to be used as our objective set""" , ) parser.add_argument( """--eval_freq""" , default=1_00 , type=UpperCamelCase__ , help="""secondary model evaluation is triggered at eval_freq""" ) parser.add_argument("""--max_steps""" , default=10_00 , type=UpperCamelCase__ , help="""To calculate training epochs""" ) parser.add_argument( """--secondary_learner_batch_size""" , default=1_28 , type=UpperCamelCase__ , help="""batch size of training data for secondary learner""" , ) parser.add_argument( """--batch_size""" , default=16 , type=UpperCamelCase__ , help="""batch size of training data of language model(gpt2) """ ) parser.add_argument( """--eval_interval""" , default=10 , type=UpperCamelCase__ , help=( """decay the selectivity of our secondary learner filter from""" """1 standard deviation above average to 1 below average after 10 batches""" ) , ) parser.add_argument( """--number""" , default=1_00 , type=UpperCamelCase__ , help="""The number of examples split to be used as objective_set/test_data""" ) parser.add_argument( """--min_len""" , default=10_26 , type=UpperCamelCase__ , help="""The minimum length of the article to be used as objective set""" ) parser.add_argument( """--secondary_learner_max_epochs""" , default=15 , type=UpperCamelCase__ , help="""number of epochs to train secondary learner""" ) parser.add_argument("""--trim""" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="""truncate the example if it exceeds context length""" ) parser.add_argument( """--threshold""" , default=1.0 , type=UpperCamelCase__ , help=( """The threshold value used by secondary learner to filter the train_data and allow only""" """ informative data as input to the model""" ) , ) parser.add_argument("""--finetuned_model_name""" , default="""gpt2_finetuned.pt""" , type=UpperCamelCase__ , help="""finetuned_model_name""" ) parser.add_argument( """--recopy_model""" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="""Reset the model to the original pretrained GPT-2 weights after each iteration""" , ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=32 , max_steps=10 , size_objective_set=1_00 , min_len=10_26 , trim=UpperCamelCase__ , data_file="""data/tokenized_stories_train_wikitext103.jbl""" , igf_data_file="""igf_context_pairs.jbl""" , ) # Load train data for secondary learner SCREAMING_SNAKE_CASE : Union[str, Any] = joblib.load("""data/IGF_values.jbl""" ) # Train secondary learner SCREAMING_SNAKE_CASE : List[str] = training_secondary_learner( UpperCamelCase__ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=1_28 , eval_freq=1_00 , igf_model_path="""igf_model.pt""" , ) # load pretrained gpt2 model SCREAMING_SNAKE_CASE : str = GPTaLMHeadModel.from_pretrained("""gpt2""" ) set_seed(42 ) # Generate train and test data to train and evaluate gpt2 model SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = generate_datasets( context_len=32 , file="""data/tokenized_stories_train_wikitext103.jbl""" , number=1_00 , min_len=10_26 , trim=UpperCamelCase__ ) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , context_len=32 , max_steps=10_00 , batch_size=16 , threshold=1.0 , recopy_model=UpperCamelCase__ , secondary_learner=UpperCamelCase__ , eval_interval=10 , finetuned_model_name="""gpt2_finetuned.pt""" , ) if __name__ == "__main__": main()
715
'''simple docstring''' import itertools import os import random import tempfile import unittest import numpy as np from transformers import TvltFeatureExtractor, is_datasets_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch if is_datasets_available(): from datasets import load_dataset __UpperCAmelCase = random.Random() def __A ( lowerCamelCase_ , lowerCamelCase_=1.0 , lowerCamelCase_=None , lowerCamelCase_=None ): """simple docstring""" if rng is None: SCREAMING_SNAKE_CASE : Optional[Any] = global_rng SCREAMING_SNAKE_CASE : Optional[int] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self : List[str] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int]=7 , lowerCamelCase_ : Optional[int]=4_00 , lowerCamelCase_ : int=20_00 , lowerCamelCase_ : List[str]=20_48 , lowerCamelCase_ : Optional[Any]=1_28 , lowerCamelCase_ : Optional[Any]=1 , lowerCamelCase_ : str=5_12 , lowerCamelCase_ : Dict=30 , lowerCamelCase_ : Dict=4_41_00 , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = parent SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size SCREAMING_SNAKE_CASE : List[str] = min_seq_length SCREAMING_SNAKE_CASE : Any = max_seq_length SCREAMING_SNAKE_CASE : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) SCREAMING_SNAKE_CASE : int = spectrogram_length SCREAMING_SNAKE_CASE : List[Any] = feature_size SCREAMING_SNAKE_CASE : Any = num_audio_channels SCREAMING_SNAKE_CASE : Tuple = hop_length SCREAMING_SNAKE_CASE : str = chunk_length SCREAMING_SNAKE_CASE : Dict = sampling_rate def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return { "spectrogram_length": self.spectrogram_length, "feature_size": self.feature_size, "num_audio_channels": self.num_audio_channels, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "sampling_rate": self.sampling_rate, } def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Tuple=False , lowerCamelCase_ : Any=False ): '''simple docstring''' def _flatten(lowerCamelCase_ : Dict ): return list(itertools.chain(*lowerCamelCase_ ) ) if equal_length: SCREAMING_SNAKE_CASE : Optional[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size SCREAMING_SNAKE_CASE : Dict = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: SCREAMING_SNAKE_CASE : Optional[int] = [np.asarray(lowerCamelCase_ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class UpperCamelCase__ ( lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = TvltFeatureExtractor def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = TvltFeatureExtractionTester(self ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(lowerCamelCase_ , """spectrogram_length""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """feature_size""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """num_audio_channels""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """hop_length""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """chunk_length""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """sampling_rate""" ) ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE : Any = feat_extract_first.save_pretrained(lowerCamelCase_ )[0] check_json_file_has_correct_format(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_extraction_class.from_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = feat_extract_first.to_dict() SCREAMING_SNAKE_CASE : List[Any] = feat_extract_second.to_dict() SCREAMING_SNAKE_CASE : Optional[int] = dict_first.pop("""mel_filters""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = dict_second.pop("""mel_filters""" ) self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ ) ) self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE : int = os.path.join(lowerCamelCase_ , """feat_extract.json""" ) feat_extract_first.to_json_file(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = self.feature_extraction_class.from_json_file(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = feat_extract_first.to_dict() SCREAMING_SNAKE_CASE : int = feat_extract_second.to_dict() SCREAMING_SNAKE_CASE : List[str] = dict_first.pop("""mel_filters""" ) SCREAMING_SNAKE_CASE : Optional[Any] = dict_second.pop("""mel_filters""" ) self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ ) ) self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.feature_extraction_class(**self.feat_extract_dict ) # create three inputs of length 800, 1000, and 1200 SCREAMING_SNAKE_CASE : Optional[int] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] SCREAMING_SNAKE_CASE : int = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs] # Test not batched input SCREAMING_SNAKE_CASE : Union[str, Any] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=4_41_00 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test batched SCREAMING_SNAKE_CASE : Optional[int] = feature_extractor(lowerCamelCase_ , return_tensors="""np""" , sampling_rate=4_41_00 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test audio masking SCREAMING_SNAKE_CASE : List[str] = feature_extractor( lowerCamelCase_ , return_tensors="""np""" , sampling_rate=4_41_00 , mask_audio=lowerCamelCase_ ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test 2-D numpy arrays are batched. SCREAMING_SNAKE_CASE : Optional[int] = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)] SCREAMING_SNAKE_CASE : int = np.asarray(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = feature_extractor(lowerCamelCase_ , return_tensors="""np""" , sampling_rate=4_41_00 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" ) # automatic decoding with librispeech SCREAMING_SNAKE_CASE : Union[str, Any] = ds.sort("""id""" ).select(range(lowerCamelCase_ ) )[:num_samples]["""audio"""] return [x["array"] for x in speech_samples] def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self._load_datasamples(1 ) SCREAMING_SNAKE_CASE : Tuple = TvltFeatureExtractor() SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(lowerCamelCase_ , return_tensors="""pt""" ).audio_values self.assertEquals(audio_values.shape , (1, 1, 1_92, 1_28) ) SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[-0.3_032, -0.2_708], [-0.4_434, -0.4_007]] ) self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , lowerCamelCase_ , atol=1e-4 ) )
79
0
from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) __UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name __UpperCAmelCase = ''' Examples: ```py >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline >>> import torch >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior") >>> pipe_prior.to("cuda") >>> prompt = "red cat, 4k photo" >>> out = pipe_prior(prompt) >>> image_emb = out.image_embeds >>> zero_image_emb = out.negative_image_embeds >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder") >>> pipe.to("cuda") >>> image = pipe( ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... height=768, ... width=768, ... num_inference_steps=50, ... ).images >>> image[0].save("cat.png") ``` ''' def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=8 ): """simple docstring""" SCREAMING_SNAKE_CASE : int = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 SCREAMING_SNAKE_CASE : Dict = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class UpperCamelCase__ ( __UpperCAmelCase ): """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : UNetaDConditionModel , lowerCamelCase_ : DDPMScheduler , lowerCamelCase_ : VQModel , ): '''simple docstring''' super().__init__() self.register_modules( unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , movq=__SCREAMING_SNAKE_CASE , ) SCREAMING_SNAKE_CASE : Union[str, Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] ): '''simple docstring''' if latents is None: SCREAMING_SNAKE_CASE : int = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE ) else: if latents.shape != shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) SCREAMING_SNAKE_CASE : Tuple = latents.to(__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Optional[int] = latents * scheduler.init_noise_sigma return latents def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Optional[Any]=0 ): '''simple docstring''' if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) SCREAMING_SNAKE_CASE : Any = torch.device(f'''cuda:{gpu_id}''' ) SCREAMING_SNAKE_CASE : Tuple = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowerCamelCase_ ( self : int , lowerCamelCase_ : str=0 ): '''simple docstring''' if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ): from accelerate import cpu_offload_with_hook else: raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" ) SCREAMING_SNAKE_CASE : Tuple = torch.device(f'''cuda:{gpu_id}''' ) if self.device.type != "cpu": self.to("""cpu""" , silence_dtype_warnings=__SCREAMING_SNAKE_CASE ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) SCREAMING_SNAKE_CASE : Tuple = None for cpu_offloaded_model in [self.unet, self.movq]: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = cpu_offload_with_hook(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prev_module_hook=__SCREAMING_SNAKE_CASE ) # We'll offload the last model manually. SCREAMING_SNAKE_CASE : Dict = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' if not hasattr(self.unet , """_hf_hook""" ): return self.device for module in self.unet.modules(): if ( hasattr(__SCREAMING_SNAKE_CASE , """_hf_hook""" ) and hasattr(module._hf_hook , """execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(__SCREAMING_SNAKE_CASE ) def __call__( self : str , lowerCamelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCamelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 1_00 , lowerCamelCase_ : float = 4.0 , lowerCamelCase_ : int = 1 , lowerCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ : Optional[torch.FloatTensor] = None , lowerCamelCase_ : Optional[str] = "pil" , lowerCamelCase_ : bool = True , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self._execution_device SCREAMING_SNAKE_CASE : Dict = guidance_scale > 1.0 if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat(__SCREAMING_SNAKE_CASE , dim=0 ) SCREAMING_SNAKE_CASE : Any = image_embeds.shape[0] * num_images_per_prompt if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat(__SCREAMING_SNAKE_CASE , dim=0 ) if do_classifier_free_guidance: SCREAMING_SNAKE_CASE : Any = image_embeds.repeat_interleave(__SCREAMING_SNAKE_CASE , dim=0 ) SCREAMING_SNAKE_CASE : str = negative_image_embeds.repeat_interleave(__SCREAMING_SNAKE_CASE , dim=0 ) SCREAMING_SNAKE_CASE : Dict = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__SCREAMING_SNAKE_CASE ) self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler.timesteps SCREAMING_SNAKE_CASE : Union[str, Any] = self.unet.config.in_channels SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = downscale_height_and_width(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.movq_scale_factor ) # create initial latent SCREAMING_SNAKE_CASE : Any = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.scheduler , ) for i, t in enumerate(self.progress_bar(__SCREAMING_SNAKE_CASE ) ): # expand the latents if we are doing classifier free guidance SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents SCREAMING_SNAKE_CASE : int = {"""image_embeds""": image_embeds} SCREAMING_SNAKE_CASE : Union[str, Any] = self.unet( sample=__SCREAMING_SNAKE_CASE , timestep=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , added_cond_kwargs=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , )[0] if do_classifier_free_guidance: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = noise_pred.chunk(2 ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = variance_pred.chunk(2 ) SCREAMING_SNAKE_CASE : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) SCREAMING_SNAKE_CASE : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , """variance_type""" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 SCREAMING_SNAKE_CASE : List[str] = self.scheduler.step( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , )[0] # post-processing SCREAMING_SNAKE_CASE : Tuple = self.movq.decode(__SCREAMING_SNAKE_CASE , force_not_quantize=__SCREAMING_SNAKE_CASE )["""sample"""] if output_type not in ["pt", "np", "pil"]: raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' ) if output_type in ["np", "pil"]: SCREAMING_SNAKE_CASE : Union[str, Any] = image * 0.5 + 0.5 SCREAMING_SNAKE_CASE : Any = image.clamp(0 , 1 ) SCREAMING_SNAKE_CASE : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": SCREAMING_SNAKE_CASE : Dict = self.numpy_to_pil(__SCREAMING_SNAKE_CASE ) if not return_dict: return (image,) return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE )
716
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __UpperCAmelCase = { """configuration_mvp""": ["""MVP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MvpConfig""", """MvpOnnxConfig"""], """tokenization_mvp""": ["""MvpTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ["""MvpTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ """MVP_PRETRAINED_MODEL_ARCHIVE_LIST""", """MvpForCausalLM""", """MvpForConditionalGeneration""", """MvpForQuestionAnswering""", """MvpForSequenceClassification""", """MvpModel""", """MvpPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
79
0
'''simple docstring''' import cva import numpy as np class UpperCamelCase__ : """simple docstring""" def __init__( self : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict ): '''simple docstring''' if k in (0.04, 0.06): SCREAMING_SNAKE_CASE : List[str] = k SCREAMING_SNAKE_CASE : str = window_size else: raise ValueError("""invalid k value""" ) def __str__( self : int ): '''simple docstring''' return str(self.k ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = cva.imread(__a , 0 ) SCREAMING_SNAKE_CASE : Optional[Any] = img.shape SCREAMING_SNAKE_CASE : list[list[int]] = [] SCREAMING_SNAKE_CASE : str = img.copy() SCREAMING_SNAKE_CASE : Optional[int] = cva.cvtColor(__a , cva.COLOR_GRAY2RGB ) SCREAMING_SNAKE_CASE : List[str] = np.gradient(__a ) SCREAMING_SNAKE_CASE : Optional[Any] = dx**2 SCREAMING_SNAKE_CASE : int = dy**2 SCREAMING_SNAKE_CASE : List[str] = dx * dy SCREAMING_SNAKE_CASE : Any = 0.04 SCREAMING_SNAKE_CASE : List[Any] = self.window_size // 2 for y in range(__a , h - offset ): for x in range(__a , w - offset ): SCREAMING_SNAKE_CASE : Union[str, Any] = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() SCREAMING_SNAKE_CASE : int = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() SCREAMING_SNAKE_CASE : List[Any] = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() SCREAMING_SNAKE_CASE : Dict = (wxx * wyy) - (wxy**2) SCREAMING_SNAKE_CASE : List[str] = wxx + wyy SCREAMING_SNAKE_CASE : Tuple = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 2_55 ) return color_img, corner_list if __name__ == "__main__": __UpperCAmelCase = HarrisCorner(0.04, 3) __UpperCAmelCase , __UpperCAmelCase = edge_detect.detect("""path_to_image""") cva.imwrite("""detect.png""", color_img)
717
'''simple docstring''' __UpperCAmelCase = [ """Audio""", """Array2D""", """Array3D""", """Array4D""", """Array5D""", """ClassLabel""", """Features""", """Sequence""", """Value""", """Image""", """Translation""", """TranslationVariableLanguages""", ] from .audio import Audio from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value from .image import Image from .translation import Translation, TranslationVariableLanguages
79
0
'''simple docstring''' from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo __UpperCAmelCase = """\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n""" __UpperCAmelCase = """\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n""" __UpperCAmelCase = """\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n""" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase__ ( datasets.Metric ): """simple docstring""" def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ), """references""": datasets.Sequence( datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ), } ) , ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : List[List[List[str]]] , lowerCamelCase_ : List[List[str]] , lowerCamelCase_ : int = 1 , lowerCamelCase_ : int = 4 , ): '''simple docstring''' return { "google_bleu": gleu_score.corpus_gleu( list_of_references=__A , hypotheses=__A , min_len=__A , max_len=__A ) }
718
'''simple docstring''' from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) __UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name __UpperCAmelCase = """ Examples: ```py >>> import torch >>> import numpy as np >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline >>> from transformers import pipeline >>> from diffusers.utils import load_image >>> def make_hint(image, depth_estimator): ... image = depth_estimator(image)[\"depth\"] ... image = np.array(image) ... image = image[:, :, None] ... image = np.concatenate([image, image, image], axis=2) ... detected_map = torch.from_numpy(image).float() / 255.0 ... hint = detected_map.permute(2, 0, 1) ... return hint >>> depth_estimator = pipeline(\"depth-estimation\") >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained( ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16 ... ) >>> pipe_prior = pipe_prior.to(\"cuda\") >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained( ... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16 ... ) >>> pipe = pipe.to(\"cuda\") >>> img = load_image( ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\" ... \"/kandinsky/cat.png\" ... ).resize((768, 768)) >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\") >>> prompt = \"A robot, 4k photo\" >>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\" >>> generator = torch.Generator(device=\"cuda\").manual_seed(43) >>> image_emb, zero_image_emb = pipe_prior( ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator ... ).to_tuple() >>> images = pipe( ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... hint=hint, ... num_inference_steps=50, ... generator=generator, ... height=768, ... width=768, ... ).images >>> images[0].save(\"robot_cat.png\") ``` """ def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=8 ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 SCREAMING_SNAKE_CASE : List[str] = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Union[str, Any] , lowerCamelCase_ : UNetaDConditionModel , lowerCamelCase_ : DDPMScheduler , lowerCamelCase_ : VQModel , ): '''simple docstring''' super().__init__() self.register_modules( unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , movq=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : str = 2 ** (len(self.movq.config.block_out_channels ) - 1) def lowerCamelCase_ ( self : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple , lowerCamelCase_ : int ): '''simple docstring''' if latents is None: SCREAMING_SNAKE_CASE : Tuple = randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ , device=lowerCamelCase_ , dtype=lowerCamelCase_ ) else: if latents.shape != shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) SCREAMING_SNAKE_CASE : Dict = latents.to(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = latents * scheduler.init_noise_sigma return latents def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Dict=0 ): '''simple docstring''' if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) SCREAMING_SNAKE_CASE : List[Any] = torch.device(f'''cuda:{gpu_id}''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Tuple=0 ): '''simple docstring''' if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ): from accelerate import cpu_offload_with_hook else: raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" ) SCREAMING_SNAKE_CASE : Any = torch.device(f'''cuda:{gpu_id}''' ) if self.device.type != "cpu": self.to("""cpu""" , silence_dtype_warnings=lowerCamelCase_ ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) SCREAMING_SNAKE_CASE : Union[str, Any] = None for cpu_offloaded_model in [self.unet, self.movq]: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = cpu_offload_with_hook(lowerCamelCase_ , lowerCamelCase_ , prev_module_hook=lowerCamelCase_ ) # We'll offload the last model manually. SCREAMING_SNAKE_CASE : str = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def lowerCamelCase_ ( self : str ): '''simple docstring''' if not hasattr(self.unet , """_hf_hook""" ): return self.device for module in self.unet.modules(): if ( hasattr(lowerCamelCase_ , """_hf_hook""" ) and hasattr(module._hf_hook , """execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(lowerCamelCase_ ) def __call__( self : Optional[Any] , lowerCamelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCamelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 1_00 , lowerCamelCase_ : float = 4.0 , lowerCamelCase_ : int = 1 , lowerCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ : Optional[torch.FloatTensor] = None , lowerCamelCase_ : Optional[str] = "pil" , lowerCamelCase_ : bool = True , ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self._execution_device SCREAMING_SNAKE_CASE : Optional[int] = guidance_scale > 1.0 if isinstance(lowerCamelCase_ , lowerCamelCase_ ): SCREAMING_SNAKE_CASE : int = torch.cat(lowerCamelCase_ , dim=0 ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Dict = torch.cat(lowerCamelCase_ , dim=0 ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat(lowerCamelCase_ , dim=0 ) SCREAMING_SNAKE_CASE : Any = image_embeds.shape[0] * num_images_per_prompt if do_classifier_free_guidance: SCREAMING_SNAKE_CASE : List[Any] = image_embeds.repeat_interleave(lowerCamelCase_ , dim=0 ) SCREAMING_SNAKE_CASE : Optional[int] = negative_image_embeds.repeat_interleave(lowerCamelCase_ , dim=0 ) SCREAMING_SNAKE_CASE : Dict = hint.repeat_interleave(lowerCamelCase_ , dim=0 ) SCREAMING_SNAKE_CASE : List[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCamelCase_ ) self.scheduler.set_timesteps(lowerCamelCase_ , device=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler.timesteps SCREAMING_SNAKE_CASE : Any = self.movq.config.latent_channels SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = downscale_height_and_width(lowerCamelCase_ , lowerCamelCase_ , self.movq_scale_factor ) # create initial latent SCREAMING_SNAKE_CASE : str = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , self.scheduler , ) for i, t in enumerate(self.progress_bar(lowerCamelCase_ ) ): # expand the latents if we are doing classifier free guidance SCREAMING_SNAKE_CASE : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents SCREAMING_SNAKE_CASE : Union[str, Any] = {"""image_embeds""": image_embeds, """hint""": hint} SCREAMING_SNAKE_CASE : Dict = self.unet( sample=lowerCamelCase_ , timestep=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , added_cond_kwargs=lowerCamelCase_ , return_dict=lowerCamelCase_ , )[0] if do_classifier_free_guidance: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = noise_pred.split(latents.shape[1] , dim=1 ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = noise_pred.chunk(2 ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = variance_pred.chunk(2 ) SCREAMING_SNAKE_CASE : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) SCREAMING_SNAKE_CASE : str = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , """variance_type""" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 SCREAMING_SNAKE_CASE : str = self.scheduler.step( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ , )[0] # post-processing SCREAMING_SNAKE_CASE : List[str] = self.movq.decode(lowerCamelCase_ , force_not_quantize=lowerCamelCase_ )["""sample"""] if output_type not in ["pt", "np", "pil"]: raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' ) if output_type in ["np", "pil"]: SCREAMING_SNAKE_CASE : Optional[int] = image * 0.5 + 0.5 SCREAMING_SNAKE_CASE : List[Any] = image.clamp(0 , 1 ) SCREAMING_SNAKE_CASE : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": SCREAMING_SNAKE_CASE : Any = self.numpy_to_pil(lowerCamelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCamelCase_ )
79
0
'''simple docstring''' import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def __A ( lowerCamelCase_ ): """simple docstring""" if ( (cp >= 0x4e00 and cp <= 0x9fff) or (cp >= 0x3400 and cp <= 0x4dbf) # or (cp >= 0x20000 and cp <= 0x2a6df) # or (cp >= 0x2a700 and cp <= 0x2b73f) # or (cp >= 0x2b740 and cp <= 0x2b81f) # or (cp >= 0x2b820 and cp <= 0x2ceaf) # or (cp >= 0xf900 and cp <= 0xfaff) or (cp >= 0x2f800 and cp <= 0x2fa1f) # ): # return True return False def __A ( lowerCamelCase_ ): """simple docstring""" for char in word: SCREAMING_SNAKE_CASE : List[str] = ord(_lowerCamelCase ) if not _is_chinese_char(_lowerCamelCase ): return 0 return 1 def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = set() for token in tokens: SCREAMING_SNAKE_CASE : Optional[int] = len(_lowerCamelCase ) > 1 and is_chinese(_lowerCamelCase ) if chinese_word: word_set.add(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Any = list(_lowerCamelCase ) return word_list def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" if not chinese_word_set: return bert_tokens SCREAMING_SNAKE_CASE : Any = max([len(_lowerCamelCase ) for w in chinese_word_set] ) SCREAMING_SNAKE_CASE : Union[str, Any] = bert_tokens SCREAMING_SNAKE_CASE : List[Any] = 0, len(_lowerCamelCase ) while start < end: SCREAMING_SNAKE_CASE : str = True if is_chinese(bert_word[start] ): SCREAMING_SNAKE_CASE : List[Any] = min(end - start , _lowerCamelCase ) for i in range(_lowerCamelCase , 1 , -1 ): SCREAMING_SNAKE_CASE : Optional[int] = """""".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): SCREAMING_SNAKE_CASE : List[Any] = """##""" + bert_word[j] SCREAMING_SNAKE_CASE : List[Any] = start + i SCREAMING_SNAKE_CASE : int = False break if single_word: start += 1 return bert_word def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = [] for i in range(0 , len(_lowerCamelCase ) , 1_00 ): SCREAMING_SNAKE_CASE : Tuple = ltp_tokenizer.pipeline(lines[i : i + 1_00] , tasks=["""cws"""] ).cws SCREAMING_SNAKE_CASE : Union[str, Any] = [get_chinese_word(_lowerCamelCase ) for r in res] ltp_res.extend(_lowerCamelCase ) assert len(_lowerCamelCase ) == len(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Tuple = [] for i in range(0 , len(_lowerCamelCase ) , 1_00 ): SCREAMING_SNAKE_CASE : Optional[int] = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=_lowerCamelCase , truncation=_lowerCamelCase , max_length=5_12 ) bert_res.extend(res["""input_ids"""] ) assert len(_lowerCamelCase ) == len(_lowerCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = [] for input_ids, chinese_word in zip(_lowerCamelCase , _lowerCamelCase ): SCREAMING_SNAKE_CASE : str = [] for id in input_ids: SCREAMING_SNAKE_CASE : Union[str, Any] = bert_tokenizer._convert_id_to_token(_lowerCamelCase ) input_tokens.append(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = add_sub_symbol(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(_lowerCamelCase ): if token[:2] == "##": SCREAMING_SNAKE_CASE : Optional[Any] = token[2:] # save chinese tokens' pos if len(_lowerCamelCase ) == 1 and _is_chinese_char(ord(_lowerCamelCase ) ): ref_id.append(_lowerCamelCase ) ref_ids.append(_lowerCamelCase ) assert len(_lowerCamelCase ) == len(_lowerCamelCase ) return ref_ids def __A ( lowerCamelCase_ ): """simple docstring""" with open(args.file_name , """r""" , encoding="""utf-8""" ) as f: SCREAMING_SNAKE_CASE : Tuple = f.readlines() SCREAMING_SNAKE_CASE : List[Any] = [line.strip() for line in data if len(_lowerCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' SCREAMING_SNAKE_CASE : List[Any] = LTP(args.ltp ) # faster in GPU device SCREAMING_SNAKE_CASE : Dict = BertTokenizer.from_pretrained(args.bert ) SCREAMING_SNAKE_CASE : Optional[int] = prepare_ref(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) with open(args.save_path , """w""" , encoding="""utf-8""" ) as f: SCREAMING_SNAKE_CASE : int = [json.dumps(_lowerCamelCase ) + """\n""" for ref in ref_ids] f.writelines(_lowerCamelCase ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser(description="""prepare_chinese_ref""") parser.add_argument( """--file_name""", required=False, type=str, default="""./resources/chinese-demo.txt""", help="""file need process, same as training data in lm""", ) parser.add_argument( """--ltp""", required=False, type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path""", ) parser.add_argument( """--bert""", required=False, type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""", ) parser.add_argument( """--save_path""", required=False, type=str, default="""./resources/ref.txt""", help="""path to save res""", ) __UpperCAmelCase = parser.parse_args() main(args)
719
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: __UpperCAmelCase = None __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} __UpperCAmelCase = { """vocab_file""": { """google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""", """google/bigbird-roberta-large""": ( """https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model""" ), """google/bigbird-base-trivia-itc""": ( """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model""" ), }, """tokenizer_file""": { """google/bigbird-roberta-base""": ( """https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json""" ), """google/bigbird-roberta-large""": ( """https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json""" ), """google/bigbird-base-trivia-itc""": ( """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json""" ), }, } __UpperCAmelCase = { """google/bigbird-roberta-base""": 4096, """google/bigbird-roberta-large""": 4096, """google/bigbird-base-trivia-itc""": 4096, } __UpperCAmelCase = """▁""" class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ = BigBirdTokenizer SCREAMING_SNAKE_CASE__ = ['''input_ids''', '''attention_mask'''] SCREAMING_SNAKE_CASE__ = [] def __init__( self : Any , lowerCamelCase_ : str=None , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict="<unk>" , lowerCamelCase_ : int="<s>" , lowerCamelCase_ : Optional[Any]="</s>" , lowerCamelCase_ : Dict="<pad>" , lowerCamelCase_ : Tuple="[SEP]" , lowerCamelCase_ : Dict="[MASK]" , lowerCamelCase_ : Union[str, Any]="[CLS]" , **lowerCamelCase_ : Dict , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else bos_token SCREAMING_SNAKE_CASE : Dict = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token SCREAMING_SNAKE_CASE : int = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token SCREAMING_SNAKE_CASE : Any = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else cls_token SCREAMING_SNAKE_CASE : Any = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE : int = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token super().__init__( lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , **lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : List[Any] = vocab_file SCREAMING_SNAKE_CASE : Optional[Any] = False if not self.vocab_file else True def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = [self.sep_token_id] SCREAMING_SNAKE_CASE : int = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ): '''simple docstring''' if already_has_special_tokens: if token_ids_a is not None: raise ValueError( """You should not supply a second sequence if the provided sequence of """ """ids is already formatted with special tokens for the model.""" ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase_ )) + [1] return [1] + ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) + [1] def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = [self.sep_token_id] SCREAMING_SNAKE_CASE : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCamelCase_ ( self : str , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(lowerCamelCase_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return SCREAMING_SNAKE_CASE : Tuple = os.path.join( lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ): copyfile(self.vocab_file , lowerCamelCase_ ) return (out_vocab_file,)
79
0
'''simple docstring''' from __future__ import annotations import typing from collections.abc import Iterable import numpy as np __UpperCAmelCase = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007 __UpperCAmelCase = typing.Union[np.floataa, int, float] # noqa: UP007 def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return np.sqrt(np.sum((np.asarray(lowercase_ ) - np.asarray(lowercase_ )) ** 2 ) ) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return sum((va - va) ** 2 for va, va in zip(lowercase_ , lowercase_ ) ) ** (1 / 2) if __name__ == "__main__": def __A ( ): """simple docstring""" from timeit import timeit print("""Without Numpy""" ) print( timeit( """euclidean_distance_no_np([1, 2, 3], [4, 5, 6])""" , number=1_00_00 , globals=globals() , ) ) print("""With Numpy""" ) print( timeit( """euclidean_distance([1, 2, 3], [4, 5, 6])""" , number=1_00_00 , globals=globals() , ) ) benchmark()
720
'''simple docstring''' import unittest import torch from torch import nn from diffusers.models.activations import get_activation class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = get_activation("""swish""" ) self.assertIsInstance(lowerCamelCase_ , nn.SiLU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = get_activation("""silu""" ) self.assertIsInstance(lowerCamelCase_ , nn.SiLU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = get_activation("""mish""" ) self.assertIsInstance(lowerCamelCase_ , nn.Mish ) self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = get_activation("""gelu""" ) self.assertIsInstance(lowerCamelCase_ , nn.GELU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
79
0
from __future__ import annotations def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ): """simple docstring""" if (stress, tangential_force, area).count(0 ) != 1: raise ValueError("""You cannot supply more or less than 2 values""" ) elif stress < 0: raise ValueError("""Stress cannot be negative""" ) elif tangential_force < 0: raise ValueError("""Tangential Force cannot be negative""" ) elif area < 0: raise ValueError("""Area cannot be negative""" ) elif stress == 0: return ( "stress", tangential_force / area, ) elif tangential_force == 0: return ( "tangential_force", stress * area, ) else: return ( "area", tangential_force / stress, ) if __name__ == "__main__": import doctest doctest.testmod()
721
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { """microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""", """microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""", """microsoft/deberta-v2-xlarge-mnli""": ( """https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json""" ), """microsoft/deberta-v2-xxlarge-mnli""": ( """https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json""" ), } class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''deberta-v2''' def __init__( self : int , lowerCamelCase_ : Optional[Any]=12_81_00 , lowerCamelCase_ : str=15_36 , lowerCamelCase_ : int=24 , lowerCamelCase_ : List[str]=24 , lowerCamelCase_ : List[Any]=61_44 , lowerCamelCase_ : List[Any]="gelu" , lowerCamelCase_ : Optional[Any]=0.1 , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : str=5_12 , lowerCamelCase_ : str=0 , lowerCamelCase_ : Union[str, Any]=0.02 , lowerCamelCase_ : Dict=1e-7 , lowerCamelCase_ : Optional[int]=False , lowerCamelCase_ : Optional[int]=-1 , lowerCamelCase_ : List[str]=0 , lowerCamelCase_ : Tuple=True , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : Optional[Any]=0 , lowerCamelCase_ : Dict="gelu" , **lowerCamelCase_ : Optional[int] , ): '''simple docstring''' super().__init__(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = hidden_size SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers SCREAMING_SNAKE_CASE : int = num_attention_heads SCREAMING_SNAKE_CASE : List[str] = intermediate_size SCREAMING_SNAKE_CASE : int = hidden_act SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : str = max_position_embeddings SCREAMING_SNAKE_CASE : Tuple = type_vocab_size SCREAMING_SNAKE_CASE : Optional[int] = initializer_range SCREAMING_SNAKE_CASE : List[Any] = relative_attention SCREAMING_SNAKE_CASE : str = max_relative_positions SCREAMING_SNAKE_CASE : int = pad_token_id SCREAMING_SNAKE_CASE : List[str] = position_biased_input # Backwards compatibility if type(lowerCamelCase_ ) == str: SCREAMING_SNAKE_CASE : Dict = [x.strip() for x in pos_att_type.lower().split("""|""" )] SCREAMING_SNAKE_CASE : Any = pos_att_type SCREAMING_SNAKE_CASE : Any = vocab_size SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_eps SCREAMING_SNAKE_CASE : str = kwargs.get("""pooler_hidden_size""" , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = pooler_dropout SCREAMING_SNAKE_CASE : Any = pooler_hidden_act class UpperCamelCase__ ( lowercase_ ): """simple docstring""" @property def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' if self.task == "multiple-choice": SCREAMING_SNAKE_CASE : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: SCREAMING_SNAKE_CASE : Union[str, Any] = {0: """batch""", 1: """sequence"""} if self._config.type_vocab_size > 0: return OrderedDict( [("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] ) else: return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] ) @property def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' return 12 def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCamelCase_ : int = -1 , lowerCamelCase_ : int = -1 , lowerCamelCase_ : int = -1 , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional["TensorType"] = None , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 40 , lowerCamelCase_ : int = 40 , lowerCamelCase_ : "PreTrainedTokenizerBase" = None , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = super().generate_dummy_inputs(preprocessor=lowerCamelCase_ , framework=lowerCamelCase_ ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
79
0
'''simple docstring''' from typing import Any def __A ( lowerCamelCase_ ): """simple docstring""" if not input_list: return [] SCREAMING_SNAKE_CASE : Dict = [input_list.count(_lowerCamelCase ) for value in input_list] SCREAMING_SNAKE_CASE : Dict = max(_lowerCamelCase ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(_lowerCamelCase ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
700
'''simple docstring''' from collections import deque from math import floor from random import random from time import time class UpperCamelCase__ : """simple docstring""" def __init__( self : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = {} def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[int]=1 ): '''simple docstring''' if self.graph.get(lowerCamelCase_ ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: SCREAMING_SNAKE_CASE : str = [[w, v]] if not self.graph.get(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Tuple = [] def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' return list(self.graph ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : str ): '''simple docstring''' if self.graph.get(lowerCamelCase_ ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any]=-2 , lowerCamelCase_ : str=-1 ): '''simple docstring''' if s == d: return [] SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : List[str] = [] if s == -2: SCREAMING_SNAKE_CASE : Tuple = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : Optional[Any] = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowerCamelCase_ ) return visited else: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : Tuple = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : int = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : Any = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return visited def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Optional[int]=-1 ): '''simple docstring''' if c == -1: SCREAMING_SNAKE_CASE : str = floor(random() * 1_00_00 ) + 10 for i in range(lowerCamelCase_ ): # every vertex has max 100 edges for _ in range(floor(random() * 1_02 ) + 1 ): SCREAMING_SNAKE_CASE : Union[str, Any] = floor(random() * c ) + 1 if n != i: self.add_pair(lowerCamelCase_ , lowerCamelCase_ , 1 ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Any=-2 ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = deque() SCREAMING_SNAKE_CASE : List[str] = [] if s == -2: SCREAMING_SNAKE_CASE : int = list(self.graph )[0] d.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) while d: SCREAMING_SNAKE_CASE : Dict = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Tuple ): '''simple docstring''' return len(self.graph[u] ) def lowerCamelCase_ ( self : str , lowerCamelCase_ : Optional[Any]=-2 ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = [] SCREAMING_SNAKE_CASE : List[str] = [] if s == -2: SCREAMING_SNAKE_CASE : Union[str, Any] = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = s SCREAMING_SNAKE_CASE : List[str] = [] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : Optional[Any] = s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : int = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : List[Any] = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : int = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return sorted_nodes def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = [] SCREAMING_SNAKE_CASE : Any = [] SCREAMING_SNAKE_CASE : List[Any] = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = -2 SCREAMING_SNAKE_CASE : int = [] SCREAMING_SNAKE_CASE : Union[str, Any] = s SCREAMING_SNAKE_CASE : Tuple = False SCREAMING_SNAKE_CASE : Union[str, Any] = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : Union[str, Any] = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): SCREAMING_SNAKE_CASE : Any = len(lowerCamelCase_ ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : int = node[1] break # check if all the children are visited if s == ss: stack.pop() SCREAMING_SNAKE_CASE : int = True if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : Any = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : List[str] = False indirect_parents.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = s SCREAMING_SNAKE_CASE : List[Any] = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return list(lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = [] SCREAMING_SNAKE_CASE : int = [] SCREAMING_SNAKE_CASE : Any = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = -2 SCREAMING_SNAKE_CASE : List[Any] = [] SCREAMING_SNAKE_CASE : Tuple = s SCREAMING_SNAKE_CASE : Dict = False SCREAMING_SNAKE_CASE : Optional[Any] = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : str = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): SCREAMING_SNAKE_CASE : str = len(lowerCamelCase_ ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : Optional[Any] = node[1] break # check if all the children are visited if s == ss: stack.pop() SCREAMING_SNAKE_CASE : Dict = True if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : List[str] = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : List[Any] = False indirect_parents.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = s SCREAMING_SNAKE_CASE : Optional[int] = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return False def lowerCamelCase_ ( self : Any , lowerCamelCase_ : str=-2 , lowerCamelCase_ : int=-1 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = time() self.dfs(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = time() return end - begin def lowerCamelCase_ ( self : int , lowerCamelCase_ : Tuple=-2 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = time() self.bfs(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = time() return end - begin class UpperCamelCase__ : """simple docstring""" def __init__( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = {} def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Union[str, Any]=1 ): '''simple docstring''' if self.graph.get(lowerCamelCase_ ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist SCREAMING_SNAKE_CASE : Any = [[w, v]] # add the other way if self.graph.get(lowerCamelCase_ ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist SCREAMING_SNAKE_CASE : Any = [[w, u]] def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any ): '''simple docstring''' if self.graph.get(lowerCamelCase_ ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowerCamelCase_ ) # the other way round if self.graph.get(lowerCamelCase_ ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(lowerCamelCase_ ) def lowerCamelCase_ ( self : int , lowerCamelCase_ : str=-2 , lowerCamelCase_ : List[str]=-1 ): '''simple docstring''' if s == d: return [] SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : Any = [] if s == -2: SCREAMING_SNAKE_CASE : List[Any] = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : Union[str, Any] = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowerCamelCase_ ) return visited else: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : Any = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : Any = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : List[str] = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return visited def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[str]=-1 ): '''simple docstring''' if c == -1: SCREAMING_SNAKE_CASE : Any = floor(random() * 1_00_00 ) + 10 for i in range(lowerCamelCase_ ): # every vertex has max 100 edges for _ in range(floor(random() * 1_02 ) + 1 ): SCREAMING_SNAKE_CASE : List[str] = floor(random() * c ) + 1 if n != i: self.add_pair(lowerCamelCase_ , lowerCamelCase_ , 1 ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[Any]=-2 ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = deque() SCREAMING_SNAKE_CASE : Tuple = [] if s == -2: SCREAMING_SNAKE_CASE : Any = list(self.graph )[0] d.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) while d: SCREAMING_SNAKE_CASE : List[Any] = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : str ): '''simple docstring''' return len(self.graph[u] ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : Any = [] SCREAMING_SNAKE_CASE : Optional[Any] = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = -2 SCREAMING_SNAKE_CASE : Tuple = [] SCREAMING_SNAKE_CASE : Any = s SCREAMING_SNAKE_CASE : Optional[Any] = False SCREAMING_SNAKE_CASE : Optional[Any] = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : Optional[Any] = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCamelCase_ ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : str = node[1] break # check if all the children are visited if s == ss: stack.pop() SCREAMING_SNAKE_CASE : Optional[int] = True if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : int = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : Union[str, Any] = False indirect_parents.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = s SCREAMING_SNAKE_CASE : str = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return list(lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = [] SCREAMING_SNAKE_CASE : int = [] SCREAMING_SNAKE_CASE : Any = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = -2 SCREAMING_SNAKE_CASE : int = [] SCREAMING_SNAKE_CASE : int = s SCREAMING_SNAKE_CASE : Union[str, Any] = False SCREAMING_SNAKE_CASE : Tuple = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : Any = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCamelCase_ ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : Tuple = node[1] break # check if all the children are visited if s == ss: stack.pop() SCREAMING_SNAKE_CASE : Any = True if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : str = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : Optional[Any] = False indirect_parents.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = s SCREAMING_SNAKE_CASE : Tuple = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return False def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return list(self.graph ) def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[str]=-2 , lowerCamelCase_ : str=-1 ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = time() self.dfs(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = time() return end - begin def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Dict=-2 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = time() self.bfs(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = time() return end - begin
79
0
'''simple docstring''' from .data_collator import ( DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForSeqaSeq, DataCollatorForSOP, DataCollatorForTokenClassification, DataCollatorForWholeWordMask, DataCollatorWithPadding, DefaultDataCollator, default_data_collator, ) from .metrics import glue_compute_metrics, xnli_compute_metrics from .processors import ( DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor, SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels, squad_convert_examples_to_features, xnli_output_modes, xnli_processors, xnli_tasks_num_labels, )
701
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {"""vocab_file""": """sentencepiece.bpe.model"""} __UpperCAmelCase = { """vocab_file""": { """moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez-orangesum-title""": ( """https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model""" ), }, } __UpperCAmelCase = { """moussaKam/mbarthez""": 1024, """moussaKam/barthez""": 1024, """moussaKam/barthez-orangesum-title""": 1024, } __UpperCAmelCase = """▁""" class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ = ['''input_ids''', '''attention_mask'''] def __init__( self : Optional[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple="<s>" , lowerCamelCase_ : Union[str, Any]="</s>" , lowerCamelCase_ : Tuple="</s>" , lowerCamelCase_ : str="<s>" , lowerCamelCase_ : Optional[int]="<unk>" , lowerCamelCase_ : List[Any]="<pad>" , lowerCamelCase_ : Optional[Any]="<mask>" , lowerCamelCase_ : Optional[Dict[str, Any]] = None , **lowerCamelCase_ : List[str] , ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token SCREAMING_SNAKE_CASE : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Dict = vocab_file SCREAMING_SNAKE_CASE : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Optional[int] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3} SCREAMING_SNAKE_CASE : str = len(self.sp_model ) - 1 SCREAMING_SNAKE_CASE : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id] SCREAMING_SNAKE_CASE : int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase_ )) + [1] return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1] def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id] SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' return len(self.sp_model ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ): '''simple docstring''' return self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : str ): '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] SCREAMING_SNAKE_CASE : List[str] = self.sp_model.PieceToId(lowerCamelCase_ ) return spm_id if spm_id else self.unk_token_id def lowerCamelCase_ ( self : int , lowerCamelCase_ : List[str] ): '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = [] SCREAMING_SNAKE_CASE : Tuple = """""" SCREAMING_SNAKE_CASE : Dict = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(lowerCamelCase_ ) + token SCREAMING_SNAKE_CASE : str = True SCREAMING_SNAKE_CASE : Optional[Any] = [] else: current_sub_tokens.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = False out_string += self.sp_model.decode(lowerCamelCase_ ) return out_string.strip() def __getstate__( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.__dict__.copy() SCREAMING_SNAKE_CASE : List[Any] = None return state def __setstate__( self : Optional[Any] , lowerCamelCase_ : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): SCREAMING_SNAKE_CASE : int = {} SCREAMING_SNAKE_CASE : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(lowerCamelCase_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return SCREAMING_SNAKE_CASE : Dict = os.path.join( lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCamelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(lowerCamelCase_ , """wb""" ) as fi: SCREAMING_SNAKE_CASE : Any = self.sp_model.serialized_model_proto() fi.write(lowerCamelCase_ ) return (out_vocab_file,)
79
0
'''simple docstring''' import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertTrue(is_safetensors_compatible(lowercase__ ) ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = [ "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertTrue(is_safetensors_compatible(lowercase__ ) ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", "unet/diffusion_pytorch_model.bin", # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(lowercase__ ) ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = [ "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", ] self.assertTrue(is_safetensors_compatible(lowercase__ ) ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", # Removed: 'text_encoder/model.safetensors', "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertFalse(is_safetensors_compatible(lowercase__ ) ) def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] SCREAMING_SNAKE_CASE : Dict = "fp16" self.assertTrue(is_safetensors_compatible(lowercase__ , variant=lowercase__ ) ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = [ "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] SCREAMING_SNAKE_CASE : str = "fp16" self.assertTrue(is_safetensors_compatible(lowercase__ , variant=lowercase__ ) ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = [ "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] SCREAMING_SNAKE_CASE : Optional[Any] = "fp16" self.assertTrue(is_safetensors_compatible(lowercase__ , variant=lowercase__ ) ) def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", "unet/diffusion_pytorch_model.fp16.bin", # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] SCREAMING_SNAKE_CASE : Union[str, Any] = "fp16" self.assertFalse(is_safetensors_compatible(lowercase__ , variant=lowercase__ ) ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = [ "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", ] SCREAMING_SNAKE_CASE : Optional[Any] = "fp16" self.assertTrue(is_safetensors_compatible(lowercase__ , variant=lowercase__ ) ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = [ "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", ] SCREAMING_SNAKE_CASE : str = "fp16" self.assertTrue(is_safetensors_compatible(lowercase__ , variant=lowercase__ ) ) def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", # 'text_encoder/model.fp16.safetensors', "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] SCREAMING_SNAKE_CASE : Dict = "fp16" self.assertFalse(is_safetensors_compatible(lowercase__ , variant=lowercase__ ) )
702
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFXLMRobertaModel @require_tf @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = TFXLMRobertaModel.from_pretrained("""jplu/tf-xlm-roberta-base""" ) SCREAMING_SNAKE_CASE : Dict = { """input_ids""": tf.convert_to_tensor([[0, 26_46, 1_02_69, 83, 9_99_42, 2]] , dtype=tf.intaa ), # "My dog is cute" """attention_mask""": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ), } SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ )["""last_hidden_state"""] SCREAMING_SNAKE_CASE : Union[str, Any] = tf.TensorShape((1, 6, 7_68) ) self.assertEqual(output.shape , lowerCamelCase_ ) # compare the actual values for a slice. SCREAMING_SNAKE_CASE : Optional[int] = tf.convert_to_tensor( [ [ [0.0_681_762, 0.10_894_451, 0.06_772_504], [-0.06_423_668, 0.02_366_615, 0.04_329_344], [-0.06_057_295, 0.09_974_135, -0.00_070_584], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
79
0
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") __UpperCAmelCase = logging.getLogger(__name__) @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , ) SCREAMING_SNAKE_CASE__ = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = field(default=lowercase_ , metadata={'''help''': '''The input training data file (a text file).'''} ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. If passed, sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={ '''help''': ( '''Whether to pad all samples to the maximum sentence length. ''' '''If False, will pad the samples dynamically when batching to the maximum length in the batch. More ''' '''efficient on GPU but very bad for TPU.''' ) } , ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' if self.train_file is not None: SCREAMING_SNAKE_CASE : Tuple = self.train_file.split(""".""" )[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: SCREAMING_SNAKE_CASE : List[str] = self.validation_file.split(""".""" )[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None def __call__( self : List[str] , lowerCamelCase_ : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = 'label' if 'label' in features[0].keys() else 'labels' SCREAMING_SNAKE_CASE : str = [feature.pop(lowerCamelCase_ ) for feature in features] SCREAMING_SNAKE_CASE : Union[str, Any] = len(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = len(features[0]["""input_ids"""] ) SCREAMING_SNAKE_CASE : str = [ [{k: v[i] for k, v in feature.items()} for i in range(lowerCamelCase_ )] for feature in features ] SCREAMING_SNAKE_CASE : Optional[int] = list(chain(*lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : str = self.tokenizer.pad( lowerCamelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , ) # Un-flatten SCREAMING_SNAKE_CASE : Optional[int] = {k: v.view(lowerCamelCase_ , lowerCamelCase_ , -1 ) for k, v in batch.items()} # Add back labels SCREAMING_SNAKE_CASE : int = torch.tensor(lowerCamelCase_ , dtype=torch.intaa ) return batch def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. SCREAMING_SNAKE_CASE : Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_swag""" , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() SCREAMING_SNAKE_CASE : Dict = training_args.get_process_log_level() logger.setLevel(SCREAMING_SNAKE_CASE_ ) datasets.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE_ ) transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE_ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(f'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. SCREAMING_SNAKE_CASE : Tuple = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: SCREAMING_SNAKE_CASE : int = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: SCREAMING_SNAKE_CASE : Union[str, Any] = {} if data_args.train_file is not None: SCREAMING_SNAKE_CASE : Union[str, Any] = data_args.train_file if data_args.validation_file is not None: SCREAMING_SNAKE_CASE : str = data_args.validation_file SCREAMING_SNAKE_CASE : Union[str, Any] = data_args.train_file.split(""".""" )[-1] SCREAMING_SNAKE_CASE : List[Any] = load_dataset( SCREAMING_SNAKE_CASE_ , data_files=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: # Downloading and loading the swag dataset from the hub. SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset( """swag""" , """regular""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) SCREAMING_SNAKE_CASE : Tuple = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # When using your own dataset or a different dataset from swag, you will probably need to change this. SCREAMING_SNAKE_CASE : Dict = [f'''ending{i}''' for i in range(4 )] SCREAMING_SNAKE_CASE : Any = 'sent1' SCREAMING_SNAKE_CASE : Dict = 'sent2' if data_args.max_seq_length is None: SCREAMING_SNAKE_CASE : List[str] = tokenizer.model_max_length if max_seq_length > 10_24: logger.warning( """The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value""" """ of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can""" """ override this default with `--block_size xxx`.""" ) SCREAMING_SNAKE_CASE : Tuple = 10_24 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the''' f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' ) SCREAMING_SNAKE_CASE : List[Any] = min(data_args.max_seq_length , tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : List[str] = [[context] * 4 for context in examples[context_name]] SCREAMING_SNAKE_CASE : int = examples[question_header_name] SCREAMING_SNAKE_CASE : List[str] = [ [f'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(SCREAMING_SNAKE_CASE_ ) ] # Flatten out SCREAMING_SNAKE_CASE : Union[str, Any] = list(chain(*SCREAMING_SNAKE_CASE_ ) ) SCREAMING_SNAKE_CASE : Optional[Any] = list(chain(*SCREAMING_SNAKE_CASE_ ) ) # Tokenize SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding="""max_length""" if data_args.pad_to_max_length else False , ) # Un-flatten return {k: [v[i : i + 4] for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError("""--do_train requires a train dataset""" ) SCREAMING_SNAKE_CASE : Dict = raw_datasets['train'] if data_args.max_train_samples is not None: SCREAMING_SNAKE_CASE : List[str] = min(len(SCREAMING_SNAKE_CASE_ ) , data_args.max_train_samples ) SCREAMING_SNAKE_CASE : str = train_dataset.select(range(SCREAMING_SNAKE_CASE_ ) ) with training_args.main_process_first(desc="""train dataset map pre-processing""" ): SCREAMING_SNAKE_CASE : int = train_dataset.map( SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError("""--do_eval requires a validation dataset""" ) SCREAMING_SNAKE_CASE : Any = raw_datasets['validation'] if data_args.max_eval_samples is not None: SCREAMING_SNAKE_CASE : Optional[int] = min(len(SCREAMING_SNAKE_CASE_ ) , data_args.max_eval_samples ) SCREAMING_SNAKE_CASE : Dict = eval_dataset.select(range(SCREAMING_SNAKE_CASE_ ) ) with training_args.main_process_first(desc="""validation dataset map pre-processing""" ): SCREAMING_SNAKE_CASE : str = eval_dataset.map( SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) # Data collator SCREAMING_SNAKE_CASE : List[str] = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Optional[Any] = eval_predictions SCREAMING_SNAKE_CASE : Tuple = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer SCREAMING_SNAKE_CASE : Optional[int] = Trainer( model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=SCREAMING_SNAKE_CASE_ , data_collator=SCREAMING_SNAKE_CASE_ , compute_metrics=SCREAMING_SNAKE_CASE_ , ) # Training if training_args.do_train: SCREAMING_SNAKE_CASE : str = None if training_args.resume_from_checkpoint is not None: SCREAMING_SNAKE_CASE : str = training_args.resume_from_checkpoint elif last_checkpoint is not None: SCREAMING_SNAKE_CASE : Union[str, Any] = last_checkpoint SCREAMING_SNAKE_CASE : Optional[Any] = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE_ ) trainer.save_model() # Saves the tokenizer too for easy upload SCREAMING_SNAKE_CASE : Tuple = train_result.metrics SCREAMING_SNAKE_CASE : List[str] = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE_ ) ) SCREAMING_SNAKE_CASE : Optional[Any] = min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ) trainer.log_metrics("""train""" , SCREAMING_SNAKE_CASE_ ) trainer.save_metrics("""train""" , SCREAMING_SNAKE_CASE_ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) SCREAMING_SNAKE_CASE : List[str] = trainer.evaluate() SCREAMING_SNAKE_CASE : Optional[int] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE : Optional[int] = min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ) trainer.log_metrics("""eval""" , SCREAMING_SNAKE_CASE_ ) trainer.save_metrics("""eval""" , SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE : Any = { 'finetuned_from': model_args.model_name_or_path, 'tasks': 'multiple-choice', 'dataset_tags': 'swag', 'dataset_args': 'regular', 'dataset': 'SWAG', 'language': 'en', } if training_args.push_to_hub: trainer.push_to_hub(**SCREAMING_SNAKE_CASE_ ) else: trainer.create_model_card(**SCREAMING_SNAKE_CASE_ ) def __A ( lowerCamelCase_ ): """simple docstring""" main() if __name__ == "__main__": main()
703
'''simple docstring''' from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = None # Automatically constructed SCREAMING_SNAKE_CASE__ = "dict" SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = field(default='''Translation''' , init=lowercase_ , repr=lowercase_ ) def __call__( self : int ): '''simple docstring''' return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' from .features import Value return {k: Value("""string""" ) for k in sorted(self.languages )} @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None # Automatically constructed SCREAMING_SNAKE_CASE__ = "dict" SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = field(default='''TranslationVariableLanguages''' , init=lowercase_ , repr=lowercase_ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = sorted(set(self.languages ) ) if self.languages else None SCREAMING_SNAKE_CASE : str = len(self.languages ) if self.languages else None def __call__( self : Tuple ): '''simple docstring''' return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = set(self.languages ) if self.languages and set(lowerCamelCase_ ) - lang_set: raise ValueError( f'''Some languages in example ({", ".join(sorted(set(lowerCamelCase_ ) - lang_set ) )}) are not in valid set ({", ".join(lowerCamelCase_ )}).''' ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. SCREAMING_SNAKE_CASE : List[Any] = [] for lang, text in translation_dict.items(): if isinstance(lowerCamelCase_ , lowerCamelCase_ ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = zip(*sorted(lowerCamelCase_ ) ) return {"language": languages, "translation": translations} def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' from .features import Sequence, Value return { "language": Sequence(Value("""string""" ) ), "translation": Sequence(Value("""string""" ) ), }
79
0
'''simple docstring''' from __future__ import annotations from typing import Any class UpperCamelCase__ ( __A ): """simple docstring""" pass class UpperCamelCase__ : """simple docstring""" def __init__( self : Tuple , lowerCamelCase_ : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = data SCREAMING_SNAKE_CASE : Node | None = None def __iter__( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self SCREAMING_SNAKE_CASE : str = [] while node: if node in visited: raise ContainsLoopError visited.append(lowerCamelCase_ ) yield node.data SCREAMING_SNAKE_CASE : int = node.next_node @property def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' try: list(self ) return False except ContainsLoopError: return True if __name__ == "__main__": __UpperCAmelCase = Node(1) __UpperCAmelCase = Node(2) __UpperCAmelCase = Node(3) __UpperCAmelCase = Node(4) print(root_node.has_loop) # False __UpperCAmelCase = root_node.next_node print(root_node.has_loop) # True __UpperCAmelCase = Node(5) __UpperCAmelCase = Node(6) __UpperCAmelCase = Node(5) __UpperCAmelCase = Node(6) print(root_node.has_loop) # False __UpperCAmelCase = Node(1) print(root_node.has_loop) # False
704
'''simple docstring''' import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor __UpperCAmelCase = logging.get_logger(__name__) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Dict , *lowerCamelCase_ : List[str] , **lowerCamelCase_ : Dict ): '''simple docstring''' warnings.warn( """The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use FlavaImageProcessor instead.""" , lowerCamelCase_ , ) super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
79
0
'''simple docstring''' import math def __A ( lowerCamelCase_ = 1_00 ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = sum(i * i for i in range(1 , n + 1 ) ) SCREAMING_SNAKE_CASE : Optional[Any] = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) ) return square_of_sum - sum_of_squares if __name__ == "__main__": print(f'''{solution() = }''')
705
'''simple docstring''' from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging __UpperCAmelCase = logging.get_logger(__name__) class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = None @staticmethod def lowerCamelCase_ ( ): '''simple docstring''' raise NotImplementedError def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : str ): '''simple docstring''' raise NotImplementedError def lowerCamelCase_ ( self : int , lowerCamelCase_ : Dict ): '''simple docstring''' raise NotImplementedError def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' if not self.is_available(): raise RuntimeError( f'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' ) @classmethod def lowerCamelCase_ ( cls : Any ): '''simple docstring''' return f'''`pip install {cls.pip_package or cls.name}`''' class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''optuna''' @staticmethod def lowerCamelCase_ ( ): '''simple docstring''' return is_optuna_available() def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : Dict ): '''simple docstring''' return run_hp_search_optuna(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) def lowerCamelCase_ ( self : str , lowerCamelCase_ : Any ): '''simple docstring''' return default_hp_space_optuna(lowerCamelCase_ ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''ray''' SCREAMING_SNAKE_CASE__ = '''\'ray[tune]\'''' @staticmethod def lowerCamelCase_ ( ): '''simple docstring''' return is_ray_available() def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : int ): '''simple docstring''' return run_hp_search_ray(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Optional[int] ): '''simple docstring''' return default_hp_space_ray(lowerCamelCase_ ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''sigopt''' @staticmethod def lowerCamelCase_ ( ): '''simple docstring''' return is_sigopt_available() def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : int ): '''simple docstring''' return run_hp_search_sigopt(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' return default_hp_space_sigopt(lowerCamelCase_ ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''wandb''' @staticmethod def lowerCamelCase_ ( ): '''simple docstring''' return is_wandb_available() def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : Optional[Any] ): '''simple docstring''' return run_hp_search_wandb(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Tuple ): '''simple docstring''' return default_hp_space_wandb(lowerCamelCase_ ) __UpperCAmelCase = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(lowerCamelCase_ ) > 0: SCREAMING_SNAKE_CASE : List[Any] = available_backends[0].name if len(lowerCamelCase_ ) > 1: logger.info( f'''{len(lowerCamelCase_ )} hyperparameter search backends available. Using {name} as the default.''' ) return name raise RuntimeError( """No hyperparameter search backend available.\n""" + """\n""".join( f''' - To install {backend.name} run {backend.pip_install()}''' for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
79
0
'''simple docstring''' import unittest from diffusers.models.unet_ad_blocks import * # noqa F403 from diffusers.utils import torch_device from .test_unet_blocks_common import UNetBlockTesterMixin class UpperCamelCase__ ( __a , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = DownBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ = '''down''' def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = [-0.0_232, -0.9_869, 0.8_054, -0.0_637, -0.1_688, -1.4_264, 0.4_470, -1.3_394, 0.0_904] super().test_output(snake_case__ ) class UpperCamelCase__ ( __a , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ResnetDownsampleBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ = '''down''' def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = [0.0_710, 0.2_410, -0.7_320, -1.0_757, -1.1_343, 0.3_540, -0.0_133, -0.2_576, 0.0_948] super().test_output(snake_case__ ) class UpperCamelCase__ ( __a , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = AttnDownBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ = '''down''' def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = [0.0_636, 0.8_964, -0.6_234, -1.0_131, 0.0_844, 0.4_935, 0.3_437, 0.0_911, -0.2_957] super().test_output(snake_case__ ) class UpperCamelCase__ ( __a , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = CrossAttnDownBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ = '''down''' def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = super().prepare_init_args_and_inputs_for_common() SCREAMING_SNAKE_CASE : List[str] = 32 return init_dict, inputs_dict def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = [0.2_238, -0.7_396, -0.2_255, -0.3_829, 0.1_925, 1.1_665, 0.0_603, -0.7_295, 0.1_983] super().test_output(snake_case__ ) class UpperCamelCase__ ( __a , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = SimpleCrossAttnDownBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ = '''down''' @property def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return super().get_dummy_input(include_encoder_hidden_states=snake_case__ ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = super().prepare_init_args_and_inputs_for_common() SCREAMING_SNAKE_CASE : Tuple = 32 return init_dict, inputs_dict @unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = [0.7_921, -0.0_992, -0.1_962, -0.7_695, -0.4_242, 0.7_804, 0.4_737, 0.2_765, 0.3_338] super().test_output(snake_case__ ) class UpperCamelCase__ ( __a , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = SkipDownBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ = '''down''' @property def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' return super().get_dummy_input(include_skip_sample=snake_case__ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = [-0.0_845, -0.2_087, -0.2_465, 0.0_971, 0.1_900, -0.0_484, 0.2_664, 0.4_179, 0.5_069] super().test_output(snake_case__ ) class UpperCamelCase__ ( __a , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = AttnSkipDownBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ = '''down''' @property def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' return super().get_dummy_input(include_skip_sample=snake_case__ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = [0.5_539, 0.1_609, 0.4_924, 0.0_537, -0.1_995, 0.4_050, 0.0_979, -0.2_721, -0.0_642] super().test_output(snake_case__ ) class UpperCamelCase__ ( __a , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = DownEncoderBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ = '''down''' @property def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' return super().get_dummy_input(include_temb=snake_case__ ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = { """in_channels""": 32, """out_channels""": 32, } SCREAMING_SNAKE_CASE : str = self.dummy_input return init_dict, inputs_dict def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = [1.1_102, 0.5_302, 0.4_872, -0.0_023, -0.8_042, 0.0_483, -0.3_489, -0.5_632, 0.7_626] super().test_output(snake_case__ ) class UpperCamelCase__ ( __a , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = AttnDownEncoderBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ = '''down''' @property def lowerCamelCase_ ( self : Any ): '''simple docstring''' return super().get_dummy_input(include_temb=snake_case__ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = { """in_channels""": 32, """out_channels""": 32, } SCREAMING_SNAKE_CASE : Dict = self.dummy_input return init_dict, inputs_dict def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = [0.8_966, -0.1_486, 0.8_568, 0.8_141, -0.9_046, -0.1_342, -0.0_972, -0.7_417, 0.1_538] super().test_output(snake_case__ ) class UpperCamelCase__ ( __a , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = UNetMidBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ = '''mid''' def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = { """in_channels""": 32, """temb_channels""": 1_28, } SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_input return init_dict, inputs_dict def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = [-0.1_062, 1.7_248, 0.3_494, 1.4_569, -0.0_910, -1.2_421, -0.9_984, 0.6_736, 1.0_028] super().test_output(snake_case__ ) class UpperCamelCase__ ( __a , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = UNetMidBlockaDCrossAttn # noqa F405 SCREAMING_SNAKE_CASE__ = '''mid''' def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = super().prepare_init_args_and_inputs_for_common() SCREAMING_SNAKE_CASE : Union[str, Any] = 32 return init_dict, inputs_dict def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = [0.0_187, 2.4_220, 0.4_484, 1.1_203, -0.6_121, -1.5_122, -0.8_270, 0.7_851, 1.8_335] super().test_output(snake_case__ ) class UpperCamelCase__ ( __a , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = UNetMidBlockaDSimpleCrossAttn # noqa F405 SCREAMING_SNAKE_CASE__ = '''mid''' @property def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' return super().get_dummy_input(include_encoder_hidden_states=snake_case__ ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = super().prepare_init_args_and_inputs_for_common() SCREAMING_SNAKE_CASE : Optional[Any] = 32 return init_dict, inputs_dict def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = [0.7_143, 1.9_974, 0.5_448, 1.3_977, 0.1_282, -1.1_237, -1.4_238, 0.5_530, 0.8_880] super().test_output(snake_case__ ) class UpperCamelCase__ ( __a , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = UpBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ = '''up''' @property def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=snake_case__ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = [-0.2_041, -0.4_165, -0.3_022, 0.0_041, -0.6_628, -0.7_053, 0.1_928, -0.0_325, 0.0_523] super().test_output(snake_case__ ) class UpperCamelCase__ ( __a , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ResnetUpsampleBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ = '''up''' @property def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=snake_case__ ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = [0.2_287, 0.3_549, -0.1_346, 0.4_797, -0.1_715, -0.9_649, 0.7_305, -0.5_864, -0.6_244] super().test_output(snake_case__ ) class UpperCamelCase__ ( __a , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = CrossAttnUpBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ = '''up''' @property def lowerCamelCase_ ( self : int ): '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=snake_case__ ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = super().prepare_init_args_and_inputs_for_common() SCREAMING_SNAKE_CASE : Dict = 32 return init_dict, inputs_dict def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = [-0.1_403, -0.3_515, -0.0_420, -0.1_425, 0.3_167, 0.5_094, -0.2_181, 0.5_931, 0.5_582] super().test_output(snake_case__ ) class UpperCamelCase__ ( __a , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = SimpleCrossAttnUpBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ = '''up''' @property def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=snake_case__ , include_encoder_hidden_states=snake_case__ ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = super().prepare_init_args_and_inputs_for_common() SCREAMING_SNAKE_CASE : Dict = 32 return init_dict, inputs_dict def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = [0.2_645, 0.1_480, 0.0_909, 0.8_044, -0.9_758, -0.9_083, 0.0_994, -1.1_453, -0.7_402] super().test_output(snake_case__ ) class UpperCamelCase__ ( __a , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = AttnUpBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ = '''up''' @property def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=snake_case__ ) @unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" ) def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = [0.0_979, 0.1_326, 0.0_021, 0.0_659, 0.2_249, 0.0_059, 0.1_132, 0.5_952, 0.1_033] super().test_output(snake_case__ ) class UpperCamelCase__ ( __a , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = SkipUpBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ = '''up''' @property def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=snake_case__ ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = [-0.0_893, -0.1_234, -0.1_506, -0.0_332, 0.0_123, -0.0_211, 0.0_566, 0.0_143, 0.0_362] super().test_output(snake_case__ ) class UpperCamelCase__ ( __a , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = AttnSkipUpBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ = '''up''' @property def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=snake_case__ ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = [0.0_361, 0.0_617, 0.2_787, -0.0_350, 0.0_342, 0.3_421, -0.0_843, 0.0_913, 0.3_015] super().test_output(snake_case__ ) class UpperCamelCase__ ( __a , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = UpDecoderBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ = '''up''' @property def lowerCamelCase_ ( self : Any ): '''simple docstring''' return super().get_dummy_input(include_temb=snake_case__ ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = {"""in_channels""": 32, """out_channels""": 32} SCREAMING_SNAKE_CASE : Dict = self.dummy_input return init_dict, inputs_dict def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = [0.4_404, 0.1_998, -0.9_886, -0.3_320, -0.3_128, -0.7_034, -0.6_955, -0.2_338, -0.3_137] super().test_output(snake_case__ ) class UpperCamelCase__ ( __a , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = AttnUpDecoderBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ = '''up''' @property def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return super().get_dummy_input(include_temb=snake_case__ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = {"""in_channels""": 32, """out_channels""": 32} SCREAMING_SNAKE_CASE : Any = self.dummy_input return init_dict, inputs_dict def lowerCamelCase_ ( self : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = [0.6_738, 0.4_491, 0.1_055, 1.0_710, 0.7_316, 0.3_339, 0.3_352, 0.1_023, 0.3_568] super().test_output(snake_case__ )
706
'''simple docstring''' import glob import os import random from string import ascii_lowercase, digits import cva __UpperCAmelCase = """""" __UpperCAmelCase = """""" __UpperCAmelCase = """""" __UpperCAmelCase = 1 # (0 is vertical, 1 is horizontal) def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = get_dataset(lowerCamelCase_ , lowerCamelCase_ ) print("""Processing...""" ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = update_image_and_anno(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) for index, image in enumerate(lowerCamelCase_ ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' SCREAMING_SNAKE_CASE : Optional[int] = random_chars(32 ) SCREAMING_SNAKE_CASE : Optional[Any] = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0] SCREAMING_SNAKE_CASE : Dict = f'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}''' cva.imwrite(f'''/{file_root}.jpg''' , lowerCamelCase_ , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f'''Success {index+1}/{len(lowerCamelCase_ )} with {file_name}''' ) SCREAMING_SNAKE_CASE : Optional[Any] = [] for anno in new_annos[index]: SCREAMING_SNAKE_CASE : Optional[Any] = f'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}''' annos_list.append(lowerCamelCase_ ) with open(f'''/{file_root}.txt''' , """w""" ) as outfile: outfile.write("""\n""".join(line for line in annos_list ) ) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = [] SCREAMING_SNAKE_CASE : Any = [] for label_file in glob.glob(os.path.join(lowerCamelCase_ , """*.txt""" ) ): SCREAMING_SNAKE_CASE : str = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0] with open(lowerCamelCase_ ) as in_file: SCREAMING_SNAKE_CASE : Any = in_file.readlines() SCREAMING_SNAKE_CASE : List[Any] = os.path.join(lowerCamelCase_ , f'''{label_name}.jpg''' ) SCREAMING_SNAKE_CASE : Tuple = [] for obj_list in obj_lists: SCREAMING_SNAKE_CASE : Union[str, Any] = obj_list.rstrip("""\n""" ).split(""" """ ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(lowerCamelCase_ ) labels.append(lowerCamelCase_ ) return img_paths, labels def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = [] SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : Optional[Any] = [] for idx in range(len(lowerCamelCase_ ) ): SCREAMING_SNAKE_CASE : List[Any] = [] SCREAMING_SNAKE_CASE : Dict = img_list[idx] path_list.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = anno_list[idx] SCREAMING_SNAKE_CASE : Optional[Any] = cva.imread(lowerCamelCase_ ) if flip_type == 1: SCREAMING_SNAKE_CASE : List[str] = cva.flip(lowerCamelCase_ , lowerCamelCase_ ) for bbox in img_annos: SCREAMING_SNAKE_CASE : List[Any] = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: SCREAMING_SNAKE_CASE : Any = cva.flip(lowerCamelCase_ , lowerCamelCase_ ) for bbox in img_annos: SCREAMING_SNAKE_CASE : Optional[Any] = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(lowerCamelCase_ ) new_imgs_list.append(lowerCamelCase_ ) return new_imgs_list, new_annos_lists, path_list def __A ( lowerCamelCase_ = 32 ): """simple docstring""" assert number_char > 1, "The number of character should greater than 1" SCREAMING_SNAKE_CASE : Dict = ascii_lowercase + digits return "".join(random.choice(lowerCamelCase_ ) for _ in range(lowerCamelCase_ ) ) if __name__ == "__main__": main() print("""DONE ✅""")
79
0
'''simple docstring''' from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. __UpperCAmelCase = 10 def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" for i in range(lowerCAmelCase_ , lowerCAmelCase_ ): if array[i] == target: return i return -1 def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = 0 SCREAMING_SNAKE_CASE : List[Any] = len(lowerCAmelCase_ ) while left <= right: if right - left < precision: return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = (left + right) // 3 + 1 SCREAMING_SNAKE_CASE : Optional[int] = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: SCREAMING_SNAKE_CASE : int = one_third - 1 elif array[two_third] < target: SCREAMING_SNAKE_CASE : Any = two_third + 1 else: SCREAMING_SNAKE_CASE : int = one_third + 1 SCREAMING_SNAKE_CASE : Dict = two_third - 1 else: return -1 def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" if left < right: if right - left < precision: return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = (left + right) // 3 + 1 SCREAMING_SNAKE_CASE : Any = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: return rec_ternary_search(lowerCAmelCase_ , one_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ ) elif array[two_third] < target: return rec_ternary_search(two_third + 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) else: return rec_ternary_search(one_third + 1 , two_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ ) else: return -1 if __name__ == "__main__": import doctest doctest.testmod() __UpperCAmelCase = input("""Enter numbers separated by comma:\n""").strip() __UpperCAmelCase = [int(item.strip()) for item in user_input.split(""",""")] assert collection == sorted(collection), f"List must be ordered.\n{collection}." __UpperCAmelCase = int(input("""Enter the number to be found in the list:\n""").strip()) __UpperCAmelCase = ite_ternary_search(collection, target) __UpperCAmelCase = rec_ternary_search(0, len(collection) - 1, collection, target) if resulta != -1: print(f'''Iterative search: {target} found at positions: {resulta}''') print(f'''Recursive search: {target} found at positions: {resulta}''') else: print("""Not found""")
707
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { """google/vivit-b-16x2-kinetics400""": ( """https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json""" ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''vivit''' def __init__( self : Tuple , lowerCamelCase_ : str=2_24 , lowerCamelCase_ : List[Any]=32 , lowerCamelCase_ : Tuple=[2, 16, 16] , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Dict=7_68 , lowerCamelCase_ : Dict=12 , lowerCamelCase_ : Any=12 , lowerCamelCase_ : List[Any]=30_72 , lowerCamelCase_ : List[str]="gelu_fast" , lowerCamelCase_ : str=0.0 , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : Optional[int]=0.02 , lowerCamelCase_ : List[Any]=1e-06 , lowerCamelCase_ : Tuple=True , **lowerCamelCase_ : Tuple , ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = hidden_size SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers SCREAMING_SNAKE_CASE : List[str] = num_attention_heads SCREAMING_SNAKE_CASE : str = intermediate_size SCREAMING_SNAKE_CASE : List[Any] = hidden_act SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Tuple = initializer_range SCREAMING_SNAKE_CASE : str = layer_norm_eps SCREAMING_SNAKE_CASE : str = image_size SCREAMING_SNAKE_CASE : Dict = num_frames SCREAMING_SNAKE_CASE : Optional[Any] = tubelet_size SCREAMING_SNAKE_CASE : Dict = num_channels SCREAMING_SNAKE_CASE : int = qkv_bias super().__init__(**lowerCamelCase_ )
79
0
'''simple docstring''' def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = len(_snake_case ) + 1 SCREAMING_SNAKE_CASE : List[str] = len(_snake_case ) + 1 # dp is a 2d matrix where dp[i][j] denotes whether prefix string of # length i of input_string matches with prefix string of length j of # given pattern. # "dp" stands for dynamic programming. SCREAMING_SNAKE_CASE : Optional[Any] = [[0 for i in range(_snake_case )] for j in range(_snake_case )] # since string of zero length match pattern of zero length SCREAMING_SNAKE_CASE : str = 1 # since pattern of zero length will never match with string of non-zero length for i in range(1 , _snake_case ): SCREAMING_SNAKE_CASE : Optional[Any] = 0 # since string of zero length will match with pattern where there # is at least one * alternatively for j in range(1 , _snake_case ): SCREAMING_SNAKE_CASE : List[Any] = dp[0][j - 2] if pattern[j - 1] == """*""" else 0 # now using bottom-up approach to find for all remaining lengths for i in range(1 , _snake_case ): for j in range(1 , _snake_case ): if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".": SCREAMING_SNAKE_CASE : Dict = dp[i - 1][j - 1] elif pattern[j - 1] == "*": if dp[i][j - 2] == 1: SCREAMING_SNAKE_CASE : Dict = 1 elif pattern[j - 2] in (input_string[i - 1], "."): SCREAMING_SNAKE_CASE : Any = dp[i - 1][j] else: SCREAMING_SNAKE_CASE : Dict = 0 else: SCREAMING_SNAKE_CASE : Tuple = 0 return bool(dp[-1][-1] ) if __name__ == "__main__": import doctest doctest.testmod() # inputing the strings # input_string = input("input a string :") # pattern = input("input a pattern :") __UpperCAmelCase = """aab""" __UpperCAmelCase = """c*a*b""" # using function to check whether given string matches the given pattern if match_pattern(input_string, pattern): print(f'''{input_string} matches the given pattern {pattern}''') else: print(f'''{input_string} does not match with the given pattern {pattern}''')
708
'''simple docstring''' import math class UpperCamelCase__ : """simple docstring""" def __init__( self : List[str] , lowerCamelCase_ : Tuple=0 ): # a graph with Node 0,1,...,N-1 '''simple docstring''' SCREAMING_SNAKE_CASE : Any = n SCREAMING_SNAKE_CASE : Optional[int] = [ [math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ ) ] # adjacency matrix for weight SCREAMING_SNAKE_CASE : Union[str, Any] = [ [math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ ) ] # dp[i][j] stores minimum distance from i to j def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = w def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): SCREAMING_SNAKE_CASE : Union[str, Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' return self.dp[u][v] if __name__ == "__main__": __UpperCAmelCase = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
79
0
'''simple docstring''' import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version __UpperCAmelCase = version.parse(importlib_metadata.version("""nltk""")) if NLTK_VERSION >= version.Version("""3.6.4"""): from nltk import word_tokenize __UpperCAmelCase = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n' __UpperCAmelCase = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n' __UpperCAmelCase = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase__ ( datasets.Metric ): """simple docstring""" def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , codebase_urls=["""https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"""] , reference_urls=[ """https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score""", """https://en.wikipedia.org/wiki/METEOR""", ] , ) def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : List[Any] ): '''simple docstring''' import nltk nltk.download("""wordnet""" ) if NLTK_VERSION >= version.Version("""3.6.5""" ): nltk.download("""punkt""" ) if NLTK_VERSION >= version.Version("""3.6.6""" ): nltk.download("""omw-1.4""" ) def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=0.9 , lowerCamelCase_ : int=3 , lowerCamelCase_ : Optional[Any]=0.5 ): '''simple docstring''' if NLTK_VERSION >= version.Version("""3.6.5""" ): SCREAMING_SNAKE_CASE : Union[str, Any] = [ meteor_score.single_meteor_score( word_tokenize(_SCREAMING_SNAKE_CASE ) , word_tokenize(_SCREAMING_SNAKE_CASE ) , alpha=_SCREAMING_SNAKE_CASE , beta=_SCREAMING_SNAKE_CASE , gamma=_SCREAMING_SNAKE_CASE ) for ref, pred in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ] else: SCREAMING_SNAKE_CASE : Tuple = [ meteor_score.single_meteor_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , alpha=_SCREAMING_SNAKE_CASE , beta=_SCREAMING_SNAKE_CASE , gamma=_SCREAMING_SNAKE_CASE ) for ref, pred in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ] return {"meteor": np.mean(_SCREAMING_SNAKE_CASE )}
709
'''simple docstring''' import math def __A ( lowerCamelCase_ ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __A ( lowerCamelCase_ = 1_00_01 ): """simple docstring""" try: SCREAMING_SNAKE_CASE : Tuple = int(lowerCamelCase_ ) except (TypeError, ValueError): raise TypeError("""Parameter nth must be int or castable to int.""" ) from None if nth <= 0: raise ValueError("""Parameter nth must be greater than or equal to one.""" ) SCREAMING_SNAKE_CASE : list[int] = [] SCREAMING_SNAKE_CASE : Dict = 2 while len(lowerCamelCase_ ) < nth: if is_prime(lowerCamelCase_ ): primes.append(lowerCamelCase_ ) num += 1 else: num += 1 return primes[len(lowerCamelCase_ ) - 1] if __name__ == "__main__": print(f'''{solution() = }''')
79
0
import math import tensorflow as tf from packaging import version def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = tf.convert_to_tensor(A_ ) SCREAMING_SNAKE_CASE : Tuple = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) )) return x * cdf def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : int = tf.convert_to_tensor(A_ ) SCREAMING_SNAKE_CASE : Tuple = tf.cast(math.pi , x.dtype ) SCREAMING_SNAKE_CASE : List[str] = tf.cast(0.044_715 , x.dtype ) SCREAMING_SNAKE_CASE : Union[str, Any] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(A_ , 3 )) )) return x * cdf def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = tf.convert_to_tensor(A_ ) return x * tf.tanh(tf.math.softplus(A_ ) ) def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : int = tf.convert_to_tensor(A_ ) SCREAMING_SNAKE_CASE : Dict = tf.cast(0.044_715 , x.dtype ) SCREAMING_SNAKE_CASE : Optional[int] = tf.cast(0.7_978_845_608 , x.dtype ) return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) )) def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = tf.convert_to_tensor(A_ ) SCREAMING_SNAKE_CASE : str = tf.cast(1.702 , x.dtype ) return x * tf.math.sigmoid(coeff * x ) def __A ( lowerCamelCase_ ): """simple docstring""" return tf.clip_by_value(_gelu(A_ ) , -10 , 10 ) def __A ( lowerCamelCase_ , lowerCamelCase_=-1 ): """simple docstring""" SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = tf.split(A_ , 2 , axis=A_ ) return a * tf.math.sigmoid(A_ ) if version.parse(tf.version.VERSION) >= version.parse("""2.4"""): def __A ( lowerCamelCase_ ): """simple docstring""" return tf.keras.activations.gelu(A_ , approximate=A_ ) __UpperCAmelCase = tf.keras.activations.gelu __UpperCAmelCase = approximate_gelu_wrap else: __UpperCAmelCase = _gelu __UpperCAmelCase = _gelu_new __UpperCAmelCase = { 'gelu': gelu, 'gelu_10': gelu_aa, 'gelu_fast': gelu_fast, 'gelu_new': gelu_new, 'glu': glu, 'mish': mish, 'quick_gelu': quick_gelu, 'relu': tf.keras.activations.relu, 'sigmoid': tf.keras.activations.sigmoid, 'silu': tf.keras.activations.swish, 'swish': tf.keras.activations.swish, 'tanh': tf.keras.activations.tanh, } def __A ( lowerCamelCase_ ): """simple docstring""" if activation_string in ACTaFN: return ACTaFN[activation_string] else: raise KeyError(f'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
710
'''simple docstring''' from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent __UpperCAmelCase = {"""UserAgent""": UserAgent().random} def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = script.contents[0] SCREAMING_SNAKE_CASE : int = json.loads(data[data.find("""{\"config\"""" ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class UpperCamelCase__ : """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = f'''https://www.instagram.com/{username}/''' SCREAMING_SNAKE_CASE : Any = self.get_json() def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = requests.get(self.url , headers=lowerCamelCase_ ).text SCREAMING_SNAKE_CASE : List[Any] = BeautifulSoup(lowerCamelCase_ , """html.parser""" ).find_all("""script""" ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__( self : Dict ): '''simple docstring''' return f'''{self.__class__.__name__}(\'{self.username}\')''' def __str__( self : int ): '''simple docstring''' return f'''{self.fullname} ({self.username}) is {self.biography}''' @property def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' return self.user_data["username"] @property def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' return self.user_data["full_name"] @property def lowerCamelCase_ ( self : int ): '''simple docstring''' return self.user_data["biography"] @property def lowerCamelCase_ ( self : str ): '''simple docstring''' return self.user_data["business_email"] @property def lowerCamelCase_ ( self : str ): '''simple docstring''' return self.user_data["external_url"] @property def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return self.user_data["edge_followed_by"]["count"] @property def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return self.user_data["edge_follow"]["count"] @property def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return self.user_data["edge_owner_to_timeline_media"]["count"] @property def lowerCamelCase_ ( self : str ): '''simple docstring''' return self.user_data["profile_pic_url_hd"] @property def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return self.user_data["is_verified"] @property def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' return self.user_data["is_private"] def __A ( lowerCamelCase_ = "github" ): """simple docstring""" import os if os.environ.get("""CI""" ): return # test failing on GitHub Actions SCREAMING_SNAKE_CASE : Any = InstagramUser(lowerCamelCase_ ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , lowerCamelCase_ ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 1_50 assert instagram_user.number_of_followers > 12_00_00 assert instagram_user.number_of_followings > 15 assert instagram_user.email == "support@github.com" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith("""https://instagram.""" ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() __UpperCAmelCase = InstagramUser("""github""") print(instagram_user) print(f'''{instagram_user.number_of_posts = }''') print(f'''{instagram_user.number_of_followers = }''') print(f'''{instagram_user.number_of_followings = }''') print(f'''{instagram_user.email = }''') print(f'''{instagram_user.website = }''') print(f'''{instagram_user.profile_picture_url = }''') print(f'''{instagram_user.is_verified = }''') print(f'''{instagram_user.is_private = }''')
79
0
'''simple docstring''' import argparse import torch # Step 1. clone https://github.com/microsoft/unilm # Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd # Step 3. cd unilm # Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink # import classes from unilm.wavlm.WavLM import WavLM as WavLMOrig from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig from transformers import WavLMConfig, WavLMModel, logging logging.set_verbosity_info() __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''', '''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''', '''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''ctc_proj''', '''mask_emb''': '''masked_spec_embed''', } __UpperCAmelCase = [ '''ctc_proj''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" for attribute in key.split(""".""" ): SCREAMING_SNAKE_CASE : List[str] = getattr(lowerCamelCase_ , lowerCamelCase_ ) if weight_type is not None: SCREAMING_SNAKE_CASE : Dict = getattr(lowerCamelCase_ , lowerCamelCase_ ).shape else: SCREAMING_SNAKE_CASE : List[Any] = hf_pointer.shape assert hf_shape == value.shape, ( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": SCREAMING_SNAKE_CASE : Dict = value elif weight_type == "weight_g": SCREAMING_SNAKE_CASE : List[Any] = value elif weight_type == "weight_v": SCREAMING_SNAKE_CASE : Any = value elif weight_type == "bias": SCREAMING_SNAKE_CASE : int = value else: SCREAMING_SNAKE_CASE : int = value logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = [] SCREAMING_SNAKE_CASE : Optional[int] = fairseq_model.state_dict() SCREAMING_SNAKE_CASE : Optional[int] = hf_model.feature_extractor for name, value in fairseq_dict.items(): SCREAMING_SNAKE_CASE : List[str] = False if "conv_layers" in name: load_conv_layer( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , hf_model.config.feat_extract_norm == """group""" , ) SCREAMING_SNAKE_CASE : Union[str, Any] = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: SCREAMING_SNAKE_CASE : List[Any] = True if "*" in mapped_key: SCREAMING_SNAKE_CASE : str = name.split(lowerCamelCase_ )[0].split(""".""" )[-2] SCREAMING_SNAKE_CASE : Any = mapped_key.replace("""*""" , lowerCamelCase_ ) if "weight_g" in name: SCREAMING_SNAKE_CASE : Dict = 'weight_g' elif "weight_v" in name: SCREAMING_SNAKE_CASE : Optional[int] = 'weight_v' elif "bias" in name and "relative_attention_bias" not in name: SCREAMING_SNAKE_CASE : Dict = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj SCREAMING_SNAKE_CASE : Union[str, Any] = 'weight' else: SCREAMING_SNAKE_CASE : List[Any] = None set_recursively(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) continue if not is_used: unused_weights.append(lowerCamelCase_ ) logger.warning(f'''Unused weights: {unused_weights}''' ) def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = full_name.split("""conv_layers.""" )[-1] SCREAMING_SNAKE_CASE : int = name.split(""".""" ) SCREAMING_SNAKE_CASE : Tuple = int(items[0] ) SCREAMING_SNAKE_CASE : Union[str, Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) SCREAMING_SNAKE_CASE : int = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) SCREAMING_SNAKE_CASE : Tuple = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) SCREAMING_SNAKE_CASE : Tuple = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) SCREAMING_SNAKE_CASE : int = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(lowerCamelCase_ ) @torch.no_grad() def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = torch.load(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = WavLMConfigOrig(checkpoint["""cfg"""] ) SCREAMING_SNAKE_CASE : Optional[int] = WavLMOrig(lowerCamelCase_ ) model.load_state_dict(checkpoint["""model"""] ) model.eval() if config_path is not None: SCREAMING_SNAKE_CASE : Tuple = WavLMConfig.from_pretrained(lowerCamelCase_ ) else: SCREAMING_SNAKE_CASE : Any = WavLMConfig() SCREAMING_SNAKE_CASE : Optional[int] = WavLMModel(lowerCamelCase_ ) recursively_load_weights(lowerCamelCase_ , lowerCamelCase_ ) hf_wavlm.save_pretrained(lowerCamelCase_ ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") __UpperCAmelCase = parser.parse_args() convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
711
'''simple docstring''' import argparse import logging from collections import namedtuple import torch from model_bertabs import BertAbsSummarizer from models.model_builder import AbsSummarizer # The authors' implementation from transformers import BertTokenizer logging.basicConfig(level=logging.INFO) __UpperCAmelCase = logging.getLogger(__name__) __UpperCAmelCase = """Hello world! cécé herlolip""" __UpperCAmelCase = namedtuple( """BertAbsConfig""", [ """temp_dir""", """large""", """use_bert_emb""", """finetune_bert""", """encoder""", """share_emb""", """max_pos""", """enc_layers""", """enc_hidden_size""", """enc_heads""", """enc_ff_size""", """enc_dropout""", """dec_layers""", """dec_hidden_size""", """dec_heads""", """dec_ff_size""", """dec_dropout""", ], ) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = BertAbsConfig( temp_dir=""".""" , finetune_bert=lowerCamelCase_ , large=lowerCamelCase_ , share_emb=lowerCamelCase_ , use_bert_emb=lowerCamelCase_ , encoder="""bert""" , max_pos=5_12 , enc_layers=6 , enc_hidden_size=5_12 , enc_heads=8 , enc_ff_size=5_12 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_68 , dec_heads=8 , dec_ff_size=20_48 , dec_dropout=0.2 , ) SCREAMING_SNAKE_CASE : int = torch.load(lowerCamelCase_ , lambda lowerCamelCase_ , lowerCamelCase_ : storage ) SCREAMING_SNAKE_CASE : List[str] = AbsSummarizer(lowerCamelCase_ , torch.device("""cpu""" ) , lowerCamelCase_ ) original.eval() SCREAMING_SNAKE_CASE : Optional[int] = BertAbsSummarizer(lowerCamelCase_ , torch.device("""cpu""" ) ) new_model.eval() # ------------------- # Convert the weights # ------------------- logging.info("""convert the model""" ) new_model.bert.load_state_dict(original.bert.state_dict() ) new_model.decoder.load_state_dict(original.decoder.state_dict() ) new_model.generator.load_state_dict(original.generator.state_dict() ) # ---------------------------------- # Make sure the outpus are identical # ---------------------------------- logging.info("""Make sure that the models' outputs are identical""" ) SCREAMING_SNAKE_CASE : int = BertTokenizer.from_pretrained("""bert-base-uncased""" ) # prepare the model inputs SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.encode("""This is sample éàalj'-.""" ) encoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(lowerCamelCase_ )) ) SCREAMING_SNAKE_CASE : List[str] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 ) SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode("""This is sample 3 éàalj'-.""" ) decoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(lowerCamelCase_ )) ) SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 ) # failsafe to make sure the weights reset does not affect the # loaded weights. assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0 # forward pass SCREAMING_SNAKE_CASE : Optional[int] = encoder_input_ids SCREAMING_SNAKE_CASE : Optional[Any] = decoder_input_ids SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : Tuple = None SCREAMING_SNAKE_CASE : int = None SCREAMING_SNAKE_CASE : List[Any] = None SCREAMING_SNAKE_CASE : Optional[int] = None # The original model does not apply the geneator layer immediatly but rather in # the beam search (where it combines softmax + linear layer). Since we already # apply the softmax in our generation process we only apply the linear layer here. # We make sure that the outputs of the full stack are identical SCREAMING_SNAKE_CASE : str = original(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0] SCREAMING_SNAKE_CASE : Optional[Any] = original.generator(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = new_model( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0] SCREAMING_SNAKE_CASE : str = new_model.generator(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = torch.max(torch.abs(output_converted_model - output_original_model ) ).item() print("""Maximum absolute difference beween weights: {:.2f}""".format(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Optional[int] = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item() print("""Maximum absolute difference beween weights: {:.2f}""".format(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Any = torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) if are_identical: logging.info("""all weights are equal up to 1e-3""" ) else: raise ValueError("""the weights are different. The new model is likely different from the original one.""" ) # The model has been saved with torch.save(model) and this is bound to the exact # directory structure. We save the state_dict instead. logging.info("""saving the model's state dictionary""" ) torch.save( new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument( """--bertabs_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""", ) __UpperCAmelCase = parser.parse_args() convert_bertabs_checkpoints( args.bertabs_checkpoint_path, args.pytorch_dump_folder_path, )
79
0
'''simple docstring''' from __future__ import annotations from collections import namedtuple def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = namedtuple("""result""" , """name value""" ) if (voltage, current, power).count(0 ) != 1: raise ValueError("""Only one argument must be 0""" ) elif power < 0: raise ValueError( """Power cannot be negative in any electrical/electronics system""" ) elif voltage == 0: return result("""voltage""" , power / current ) elif current == 0: return result("""current""" , power / voltage ) elif power == 0: return result("""power""" , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError("""Exactly one argument must be 0""" ) if __name__ == "__main__": import doctest doctest.testmod()
712
'''simple docstring''' import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=True , lowerCamelCase_="pt" ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = {"""add_prefix_space""": True} if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and not line.startswith(""" """ ) else {} SCREAMING_SNAKE_CASE : Optional[Any] = padding_side return tokenizer( [line] , max_length=lowerCamelCase_ , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase_ , return_tensors=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , **lowerCamelCase_ , ) def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , ): """simple docstring""" SCREAMING_SNAKE_CASE : int = input_ids.ne(lowerCamelCase_ ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str]="train" , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : int=None , lowerCamelCase_ : Union[str, Any]="" , ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : str = Path(lowerCamelCase_ ).joinpath(type_path + """.source""" ) SCREAMING_SNAKE_CASE : Optional[Any] = Path(lowerCamelCase_ ).joinpath(type_path + """.target""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_char_lens(self.src_file ) SCREAMING_SNAKE_CASE : int = max_source_length SCREAMING_SNAKE_CASE : str = max_target_length assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}''' SCREAMING_SNAKE_CASE : List[str] = tokenizer SCREAMING_SNAKE_CASE : Dict = prefix if n_obs is not None: SCREAMING_SNAKE_CASE : List[Any] = self.src_lens[:n_obs] SCREAMING_SNAKE_CASE : int = src_lang SCREAMING_SNAKE_CASE : Optional[int] = tgt_lang def __len__( self : List[Any] ): '''simple docstring''' return len(self.src_lens ) def __getitem__( self : Union[str, Any] , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = index + 1 # linecache starts at 1 SCREAMING_SNAKE_CASE : Dict = self.prefix + linecache.getline(str(self.src_file ) , lowerCamelCase_ ).rstrip("""\n""" ) SCREAMING_SNAKE_CASE : Dict = linecache.getline(str(self.tgt_file ) , lowerCamelCase_ ).rstrip("""\n""" ) assert source_line, f'''empty source line for index {index}''' assert tgt_line, f'''empty tgt line for index {index}''' # Need to add eos token manually for T5 if isinstance(self.tokenizer , lowerCamelCase_ ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right SCREAMING_SNAKE_CASE : Union[str, Any] = ( self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCamelCase_ ) else self.tokenizer ) SCREAMING_SNAKE_CASE : Any = self.tokenizer.generator if isinstance(self.tokenizer , lowerCamelCase_ ) else self.tokenizer SCREAMING_SNAKE_CASE : Optional[int] = encode_line(lowerCamelCase_ , lowerCamelCase_ , self.max_source_length , """right""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = encode_line(lowerCamelCase_ , lowerCamelCase_ , self.max_target_length , """right""" ) SCREAMING_SNAKE_CASE : Tuple = source_inputs["""input_ids"""].squeeze() SCREAMING_SNAKE_CASE : Tuple = target_inputs["""input_ids"""].squeeze() SCREAMING_SNAKE_CASE : List[str] = source_inputs["""attention_mask"""].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def lowerCamelCase_ ( lowerCamelCase_ : Dict ): '''simple docstring''' return [len(lowerCamelCase_ ) for x in Path(lowerCamelCase_ ).open().readlines()] def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = torch.stack([x["""input_ids"""] for x in batch] ) SCREAMING_SNAKE_CASE : int = torch.stack([x["""attention_mask"""] for x in batch] ) SCREAMING_SNAKE_CASE : int = torch.stack([x["""decoder_input_ids"""] for x in batch] ) SCREAMING_SNAKE_CASE : int = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer , lowerCamelCase_ ) else self.tokenizer.pad_token_id ) SCREAMING_SNAKE_CASE : Dict = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer , lowerCamelCase_ ) else self.tokenizer.pad_token_id ) SCREAMING_SNAKE_CASE : Optional[int] = trim_batch(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = trim_batch(lowerCamelCase_ , lowerCamelCase_ , attention_mask=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = { """input_ids""": source_ids, """attention_mask""": source_mask, """decoder_input_ids""": y, } return batch __UpperCAmelCase = getLogger(__name__) def __A ( lowerCamelCase_ ): """simple docstring""" return list(itertools.chain.from_iterable(lowerCamelCase_ ) ) def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = get_git_info() save_json(lowerCamelCase_ , os.path.join(lowerCamelCase_ , """git_log.json""" ) ) def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=4 , **lowerCamelCase_ ): """simple docstring""" with open(lowerCamelCase_ , """w""" ) as f: json.dump(lowerCamelCase_ , lowerCamelCase_ , indent=lowerCamelCase_ , **lowerCamelCase_ ) def __A ( lowerCamelCase_ ): """simple docstring""" with open(lowerCamelCase_ ) as f: return json.load(lowerCamelCase_ ) def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = git.Repo(search_parent_directories=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = { """repo_id""": str(lowerCamelCase_ ), """repo_sha""": str(repo.head.object.hexsha ), """repo_branch""": str(repo.active_branch ), """hostname""": str(socket.gethostname() ), } return repo_infos def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return list(map(lowerCamelCase_ , lowerCamelCase_ ) ) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" with open(lowerCamelCase_ , """wb""" ) as f: return pickle.dump(lowerCamelCase_ , lowerCamelCase_ ) def __A ( lowerCamelCase_ ): """simple docstring""" def remove_articles(lowerCamelCase_ ): return re.sub(R"""\b(a|an|the)\b""" , """ """ , lowerCamelCase_ ) def white_space_fix(lowerCamelCase_ ): return " ".join(text.split() ) def remove_punc(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : int = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(lowerCamelCase_ ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase_ ) ) ) ) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = normalize_answer(lowerCamelCase_ ).split() SCREAMING_SNAKE_CASE : Optional[int] = normalize_answer(lowerCamelCase_ ).split() SCREAMING_SNAKE_CASE : Tuple = Counter(lowerCamelCase_ ) & Counter(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = sum(common.values() ) if num_same == 0: return 0 SCREAMING_SNAKE_CASE : Optional[int] = 1.0 * num_same / len(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = 1.0 * num_same / len(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = (2 * precision * recall) / (precision + recall) return fa def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return normalize_answer(lowerCamelCase_ ) == normalize_answer(lowerCamelCase_ ) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = 0 for hypo, pred in zip(lowerCamelCase_ , lowerCamelCase_ ): em += exact_match_score(lowerCamelCase_ , lowerCamelCase_ ) if len(lowerCamelCase_ ) > 0: em /= len(lowerCamelCase_ ) return {"em": em} def __A ( lowerCamelCase_ ): """simple docstring""" return model_prefix.startswith("""rag""" ) def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead SCREAMING_SNAKE_CASE : Dict = """dropout_rate""" for p in extra_params: if getattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): if not hasattr(lowerCamelCase_ , lowerCamelCase_ ) and not hasattr(lowerCamelCase_ , equivalent_param[p] ): logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase_ ) ) delattr(lowerCamelCase_ , lowerCamelCase_ ) continue SCREAMING_SNAKE_CASE : Dict = p if hasattr(lowerCamelCase_ , lowerCamelCase_ ) else equivalent_param[p] setattr(lowerCamelCase_ , lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) ) delattr(lowerCamelCase_ , lowerCamelCase_ ) return hparams, config
79
0
'''simple docstring''' from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = [randint(-10_00 , 10_00 ) for i in range(10 )] SCREAMING_SNAKE_CASE : Union[str, Any] = randint(-50_00 , 50_00 ) return (arr, r) __UpperCAmelCase = make_dataset() def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" for triplet in permutations(A__ , 3 ): if sum(A__ ) == target: return tuple(sorted(A__ ) ) return (0, 0, 0) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" arr.sort() SCREAMING_SNAKE_CASE : Dict = len(A__ ) for i in range(n - 1 ): SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : str = """ from __main__ import dataset, triplet_sum1, triplet_sum2 """ SCREAMING_SNAKE_CASE : Union[str, Any] = """ triplet_sum1(*dataset) """ SCREAMING_SNAKE_CASE : Optional[int] = """ triplet_sum2(*dataset) """ SCREAMING_SNAKE_CASE : Tuple = repeat(setup=A__ , stmt=A__ , repeat=5 , number=1_00_00 ) SCREAMING_SNAKE_CASE : Optional[int] = repeat(setup=A__ , stmt=A__ , repeat=5 , number=1_00_00 ) return (min(A__ ), min(A__ )) if __name__ == "__main__": from doctest import testmod testmod() __UpperCAmelCase = solution_times() print(f'''The time for naive implementation is {times[0]}.''') print(f'''The time for optimized implementation is {times[1]}.''')
713
'''simple docstring''' def __A ( lowerCamelCase_ ): """simple docstring""" return 1 if digit in (0, 1) else (digit * factorial(digit - 1 )) def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = 0 SCREAMING_SNAKE_CASE : List[str] = number while duplicate > 0: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = divmod(lowerCamelCase_ , 10 ) fact_sum += factorial(lowerCamelCase_ ) return fact_sum == number if __name__ == "__main__": print("""Program to check whether a number is a Krisnamurthy Number or not.""") __UpperCAmelCase = int(input("""Enter number: """).strip()) print( f'''{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number.''' )
79
0
import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" ) SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained("""google/mt5-small""" ) SCREAMING_SNAKE_CASE : Dict = tokenizer("""Hello there""" , return_tensors="""np""" ).input_ids SCREAMING_SNAKE_CASE : Dict = tokenizer("""Hi I am""" , return_tensors="""np""" ).input_ids SCREAMING_SNAKE_CASE : List[Any] = shift_tokens_right(lowerCamelCase_ , model.config.pad_token_id , model.config.decoder_start_token_id ) SCREAMING_SNAKE_CASE : str = model(lowerCamelCase_ , decoder_input_ids=lowerCamelCase_ ).logits SCREAMING_SNAKE_CASE : Optional[int] = optax.softmax_cross_entropy(lowerCamelCase_ , onehot(lowerCamelCase_ , logits.shape[-1] ) ).mean() SCREAMING_SNAKE_CASE : List[Any] = -(labels.shape[-1] * loss.item()) SCREAMING_SNAKE_CASE : int = -84.9_127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
714
'''simple docstring''' # Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING import numpy as np import pyarrow as pa from .. import config from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import torch class UpperCamelCase__ ( TensorFormatter[Mapping, '''torch.Tensor''', Mapping] ): """simple docstring""" def __init__( self : Tuple , lowerCamelCase_ : str=None , **lowerCamelCase_ : Dict ): '''simple docstring''' super().__init__(features=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = torch_tensor_kwargs import torch # noqa import torch at initialization def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' import torch if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and column: if all( isinstance(lowerCamelCase_ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return torch.stack(lowerCamelCase_ ) return column def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : int ): '''simple docstring''' import torch if isinstance(lowerCamelCase_ , (str, bytes, type(lowerCamelCase_ )) ): return value elif isinstance(lowerCamelCase_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() SCREAMING_SNAKE_CASE : str = {} if isinstance(lowerCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): SCREAMING_SNAKE_CASE : Any = {"""dtype""": torch.intaa} elif isinstance(lowerCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): SCREAMING_SNAKE_CASE : int = {"""dtype""": torch.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(lowerCamelCase_ , PIL.Image.Image ): SCREAMING_SNAKE_CASE : List[Any] = np.asarray(lowerCamelCase_ ) return torch.tensor(lowerCamelCase_ , **{**default_dtype, **self.torch_tensor_kwargs} ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' import torch # support for torch, tf, jax etc. if hasattr(lowerCamelCase_ , """__array__""" ) and not isinstance(lowerCamelCase_ , torch.Tensor ): SCREAMING_SNAKE_CASE : Dict = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(lowerCamelCase_ , np.ndarray ): if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(lowerCamelCase_ ) for substruct in data_struct] ) elif isinstance(lowerCamelCase_ , (list, tuple) ): return self._consolidate([self.recursive_tensorize(lowerCamelCase_ ) for substruct in data_struct] ) return self._tensorize(lowerCamelCase_ ) def lowerCamelCase_ ( self : int , lowerCamelCase_ : dict ): '''simple docstring''' return map_nested(self._recursive_tensorize , lowerCamelCase_ , map_list=lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : pa.Table ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.numpy_arrow_extractor().extract_row(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = self.python_features_decoder.decode_row(lowerCamelCase_ ) return self.recursive_tensorize(lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : pa.Table ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.numpy_arrow_extractor().extract_column(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = self.python_features_decoder.decode_column(lowerCamelCase_ , pa_table.column_names[0] ) SCREAMING_SNAKE_CASE : List[str] = self.recursive_tensorize(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = self._consolidate(lowerCamelCase_ ) return column def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : pa.Table ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.numpy_arrow_extractor().extract_batch(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = self.python_features_decoder.decode_batch(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = self.recursive_tensorize(lowerCamelCase_ ) for column_name in batch: SCREAMING_SNAKE_CASE : Tuple = self._consolidate(batch[column_name] ) return batch
79
0
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import TensorType, logging if TYPE_CHECKING: from ...onnx.config import PatchingSpec from ...tokenization_utils_base import PreTrainedTokenizerBase __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { """allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json""", """allenai/longformer-large-4096""": """https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json""", """allenai/longformer-large-4096-finetuned-triviaqa""": ( """https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json""" ), """allenai/longformer-base-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json""" ), """allenai/longformer-large-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json""" ), } class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''longformer''' def __init__( self : Dict , lowerCamelCase_ : Union[List[int], int] = 5_12 , lowerCamelCase_ : int = 2 , lowerCamelCase_ : int = 1 , lowerCamelCase_ : int = 0 , lowerCamelCase_ : int = 2 , lowerCamelCase_ : int = 3_05_22 , lowerCamelCase_ : int = 7_68 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : int = 30_72 , lowerCamelCase_ : str = "gelu" , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 2 , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : float = 1e-12 , lowerCamelCase_ : bool = False , **lowerCamelCase_ : Optional[Any] , ): '''simple docstring''' super().__init__(pad_token_id=lowercase_ , **lowercase_ ) SCREAMING_SNAKE_CASE : str = attention_window SCREAMING_SNAKE_CASE : List[Any] = sep_token_id SCREAMING_SNAKE_CASE : List[str] = bos_token_id SCREAMING_SNAKE_CASE : Union[str, Any] = eos_token_id SCREAMING_SNAKE_CASE : Tuple = vocab_size SCREAMING_SNAKE_CASE : Optional[int] = hidden_size SCREAMING_SNAKE_CASE : Any = num_hidden_layers SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads SCREAMING_SNAKE_CASE : List[str] = hidden_act SCREAMING_SNAKE_CASE : int = intermediate_size SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Any = max_position_embeddings SCREAMING_SNAKE_CASE : Union[str, Any] = type_vocab_size SCREAMING_SNAKE_CASE : Dict = initializer_range SCREAMING_SNAKE_CASE : Dict = layer_norm_eps SCREAMING_SNAKE_CASE : Dict = onnx_export class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : str , lowerCamelCase_ : "PretrainedConfig" , lowerCamelCase_ : str = "default" , lowerCamelCase_ : "List[PatchingSpec]" = None ): '''simple docstring''' super().__init__(lowercase_ , lowercase_ , lowercase_ ) SCREAMING_SNAKE_CASE : Dict = True @property def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' if self.task == "multiple-choice": SCREAMING_SNAKE_CASE : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: SCREAMING_SNAKE_CASE : List[str] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""global_attention_mask""", dynamic_axis), ] ) @property def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = super().outputs if self.task == "default": SCREAMING_SNAKE_CASE : str = {0: """batch"""} return outputs @property def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' return 1e-4 @property def lowerCamelCase_ ( self : Any ): '''simple docstring''' return max(super().default_onnx_opset , 14 ) def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : "PreTrainedTokenizerBase" , lowerCamelCase_ : int = -1 , lowerCamelCase_ : int = -1 , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[TensorType] = None , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = super().generate_dummy_inputs( preprocessor=lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ ) import torch # for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64) # makes the export fail randomly SCREAMING_SNAKE_CASE : Dict = torch.zeros_like(inputs["""input_ids"""] ) # make every second token global SCREAMING_SNAKE_CASE : Any = 1 return inputs
715
'''simple docstring''' import itertools import os import random import tempfile import unittest import numpy as np from transformers import TvltFeatureExtractor, is_datasets_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch if is_datasets_available(): from datasets import load_dataset __UpperCAmelCase = random.Random() def __A ( lowerCamelCase_ , lowerCamelCase_=1.0 , lowerCamelCase_=None , lowerCamelCase_=None ): """simple docstring""" if rng is None: SCREAMING_SNAKE_CASE : Optional[Any] = global_rng SCREAMING_SNAKE_CASE : Optional[int] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self : List[str] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int]=7 , lowerCamelCase_ : Optional[int]=4_00 , lowerCamelCase_ : int=20_00 , lowerCamelCase_ : List[str]=20_48 , lowerCamelCase_ : Optional[Any]=1_28 , lowerCamelCase_ : Optional[Any]=1 , lowerCamelCase_ : str=5_12 , lowerCamelCase_ : Dict=30 , lowerCamelCase_ : Dict=4_41_00 , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = parent SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size SCREAMING_SNAKE_CASE : List[str] = min_seq_length SCREAMING_SNAKE_CASE : Any = max_seq_length SCREAMING_SNAKE_CASE : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) SCREAMING_SNAKE_CASE : int = spectrogram_length SCREAMING_SNAKE_CASE : List[Any] = feature_size SCREAMING_SNAKE_CASE : Any = num_audio_channels SCREAMING_SNAKE_CASE : Tuple = hop_length SCREAMING_SNAKE_CASE : str = chunk_length SCREAMING_SNAKE_CASE : Dict = sampling_rate def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return { "spectrogram_length": self.spectrogram_length, "feature_size": self.feature_size, "num_audio_channels": self.num_audio_channels, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "sampling_rate": self.sampling_rate, } def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Tuple=False , lowerCamelCase_ : Any=False ): '''simple docstring''' def _flatten(lowerCamelCase_ : Dict ): return list(itertools.chain(*lowerCamelCase_ ) ) if equal_length: SCREAMING_SNAKE_CASE : Optional[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size SCREAMING_SNAKE_CASE : Dict = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: SCREAMING_SNAKE_CASE : Optional[int] = [np.asarray(lowerCamelCase_ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class UpperCamelCase__ ( lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = TvltFeatureExtractor def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = TvltFeatureExtractionTester(self ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(lowerCamelCase_ , """spectrogram_length""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """feature_size""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """num_audio_channels""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """hop_length""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """chunk_length""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """sampling_rate""" ) ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE : Any = feat_extract_first.save_pretrained(lowerCamelCase_ )[0] check_json_file_has_correct_format(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_extraction_class.from_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = feat_extract_first.to_dict() SCREAMING_SNAKE_CASE : List[Any] = feat_extract_second.to_dict() SCREAMING_SNAKE_CASE : Optional[int] = dict_first.pop("""mel_filters""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = dict_second.pop("""mel_filters""" ) self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ ) ) self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE : int = os.path.join(lowerCamelCase_ , """feat_extract.json""" ) feat_extract_first.to_json_file(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = self.feature_extraction_class.from_json_file(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = feat_extract_first.to_dict() SCREAMING_SNAKE_CASE : int = feat_extract_second.to_dict() SCREAMING_SNAKE_CASE : List[str] = dict_first.pop("""mel_filters""" ) SCREAMING_SNAKE_CASE : Optional[Any] = dict_second.pop("""mel_filters""" ) self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ ) ) self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.feature_extraction_class(**self.feat_extract_dict ) # create three inputs of length 800, 1000, and 1200 SCREAMING_SNAKE_CASE : Optional[int] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] SCREAMING_SNAKE_CASE : int = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs] # Test not batched input SCREAMING_SNAKE_CASE : Union[str, Any] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=4_41_00 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test batched SCREAMING_SNAKE_CASE : Optional[int] = feature_extractor(lowerCamelCase_ , return_tensors="""np""" , sampling_rate=4_41_00 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test audio masking SCREAMING_SNAKE_CASE : List[str] = feature_extractor( lowerCamelCase_ , return_tensors="""np""" , sampling_rate=4_41_00 , mask_audio=lowerCamelCase_ ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test 2-D numpy arrays are batched. SCREAMING_SNAKE_CASE : Optional[int] = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)] SCREAMING_SNAKE_CASE : int = np.asarray(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = feature_extractor(lowerCamelCase_ , return_tensors="""np""" , sampling_rate=4_41_00 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" ) # automatic decoding with librispeech SCREAMING_SNAKE_CASE : Union[str, Any] = ds.sort("""id""" ).select(range(lowerCamelCase_ ) )[:num_samples]["""audio"""] return [x["array"] for x in speech_samples] def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self._load_datasamples(1 ) SCREAMING_SNAKE_CASE : Tuple = TvltFeatureExtractor() SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(lowerCamelCase_ , return_tensors="""pt""" ).audio_values self.assertEquals(audio_values.shape , (1, 1, 1_92, 1_28) ) SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[-0.3_032, -0.2_708], [-0.4_434, -0.4_007]] ) self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , lowerCamelCase_ , atol=1e-4 ) )
79
0
import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin __UpperCAmelCase = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.plbart.modeling_plbart import shift_tokens_right __UpperCAmelCase = 50003 __UpperCAmelCase = 50002 @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( __lowerCAmelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = PLBartTokenizer SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = False def lowerCamelCase_ ( self : Dict ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE : List[Any] = PLBartTokenizer(lowerCAmelCase_ , language_codes="""base""" , keep_accents=lowerCAmelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = PLBartTokenizer(lowerCAmelCase_ , language_codes="""base""" , keep_accents=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE : Dict = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( lowerCAmelCase_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) self.assertListEqual( lowerCAmelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) self.assertListEqual( lowerCAmelCase_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.vocab_size SCREAMING_SNAKE_CASE : Union[str, Any] = [tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) for x in range(end - 4 , lowerCAmelCase_ )] self.assertListEqual(lowerCAmelCase_ , ["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] ) SCREAMING_SNAKE_CASE : Tuple = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go""" SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(lowerCAmelCase_ ).input_ids self.assertEqual( tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ ) , lowerCAmelCase_ , ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = PLBartTokenizer(lowerCAmelCase_ , language_codes="""multi""" , keep_accents=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) SCREAMING_SNAKE_CASE : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( lowerCAmelCase_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) SCREAMING_SNAKE_CASE : List[str] = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) self.assertListEqual( lowerCAmelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) self.assertListEqual( lowerCAmelCase_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.vocab_size SCREAMING_SNAKE_CASE : Any = [tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) for x in range(end - 7 , lowerCAmelCase_ )] self.assertListEqual( lowerCAmelCase_ , ["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] ) SCREAMING_SNAKE_CASE : List[Any] = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go""" SCREAMING_SNAKE_CASE : Dict = tokenizer(lowerCAmelCase_ ).input_ids self.assertEqual( tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ ) , lowerCAmelCase_ , ) @require_torch @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''uclanlp/plbart-python-en_XX''' SCREAMING_SNAKE_CASE__ = [ '''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''', '''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''', ] SCREAMING_SNAKE_CASE__ = [ '''Returns the maximum value of a b c.''', '''Sums the values of a b c.''', ] SCREAMING_SNAKE_CASE__ = [ 134, 5452, 3_3460, 3_3441, 3_3463, 3_3465, 3_3463, 3_3449, 988, 20, 3_3456, 19, 3_3456, 771, 39, 4258, 889, 3318, 3_3441, 3_3463, 3_3465, 3_3463, 3_3449, 2471, 2, PYTHON_CODE, ] @classmethod def lowerCamelCase_ ( cls : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = PLBartTokenizer.from_pretrained( cls.checkpoint_name , language_codes="""base""" , src_lang="""python""" , tgt_lang="""en_XX""" ) SCREAMING_SNAKE_CASE : Optional[Any] = 1 return cls def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] , 5_00_01 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] , 5_00_02 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] , 5_00_03 ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' self.assertIn(lowerCAmelCase_ , self.tokenizer.all_special_ids ) SCREAMING_SNAKE_CASE : Union[str, Any] = [EN_CODE, 90_37, 3_34_42, 57, 7_52, 1_53, 14, 56, 18, 9, 2] SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase_ ) self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase_ ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = ["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 20] self.assertIsInstance(src_text[0] , lowerCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = 10 SCREAMING_SNAKE_CASE : str = self.tokenizer(lowerCAmelCase_ , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , lowerCAmelCase_ ) self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ) , [5_00_04, 5_00_01] ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = tempfile.mkdtemp() SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = PLBartTokenizer.from_pretrained(lowerCAmelCase_ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase_ ) @require_torch def lowerCamelCase_ ( self : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , return_tensors="""pt""" ) SCREAMING_SNAKE_CASE : str = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] ) self.assertEqual(batch.decoder_input_ids[1][0] , lowerCAmelCase_ ) self.assertEqual(batch.decoder_input_ids[1][-1] , 2 ) self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] ) @require_torch def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , ) SCREAMING_SNAKE_CASE : int = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual((2, 26) , batch.input_ids.shape ) self.assertEqual((2, 26) , batch.attention_mask.shape ) SCREAMING_SNAKE_CASE : Any = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(self.src_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=3 , return_tensors="""pt""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer( text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=10 , return_tensors="""pt""" ) SCREAMING_SNAKE_CASE : List[str] = targets["""input_ids"""] SCREAMING_SNAKE_CASE : List[str] = shift_tokens_right(lowerCAmelCase_ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.tokenizer._build_translation_inputs( """A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""java""" ) self.assertEqual( nested_simplify(lowerCAmelCase_ ) , { # A, test, EOS, en_XX """input_ids""": [[1_50, 2_42, 2, 5_00_03]], """attention_mask""": [[1, 1, 1, 1]], # java """forced_bos_token_id""": 5_00_01, } , )
716
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __UpperCAmelCase = { """configuration_mvp""": ["""MVP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MvpConfig""", """MvpOnnxConfig"""], """tokenization_mvp""": ["""MvpTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ["""MvpTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ """MVP_PRETRAINED_MODEL_ARCHIVE_LIST""", """MvpForCausalLM""", """MvpForConditionalGeneration""", """MvpForQuestionAnswering""", """MvpForSequenceClassification""", """MvpModel""", """MvpPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
79
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'facebook/nllb-moe-54B': 'https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json', } class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = 'nllb-moe' SCREAMING_SNAKE_CASE__ = ['past_key_values'] SCREAMING_SNAKE_CASE__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self : List[str] , lowerCamelCase_ : Dict=12_81_12 , lowerCamelCase_ : Any=10_24 , lowerCamelCase_ : str=12 , lowerCamelCase_ : Union[str, Any]=40_96 , lowerCamelCase_ : Tuple=16 , lowerCamelCase_ : Union[str, Any]=12 , lowerCamelCase_ : int=40_96 , lowerCamelCase_ : Optional[Any]=16 , lowerCamelCase_ : List[Any]=0.05 , lowerCamelCase_ : Dict=0.05 , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : List[Any]="relu" , lowerCamelCase_ : Union[str, Any]=10_24 , lowerCamelCase_ : int=0.1 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : Tuple=0.0 , lowerCamelCase_ : str=0.02 , lowerCamelCase_ : Dict=2 , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : Any=False , lowerCamelCase_ : Union[str, Any]="float32" , lowerCamelCase_ : int=False , lowerCamelCase_ : List[Any]=1_28 , lowerCamelCase_ : Optional[int]=64 , lowerCamelCase_ : Optional[Any]=4 , lowerCamelCase_ : List[str]=4 , lowerCamelCase_ : Optional[Any]=0.001 , lowerCamelCase_ : Tuple=0.001 , lowerCamelCase_ : Tuple="all" , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : int=False , lowerCamelCase_ : List[Any]=1.0 , lowerCamelCase_ : List[str]=0.2 , lowerCamelCase_ : List[str]=1 , lowerCamelCase_ : Tuple=0 , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Tuple=False , **lowerCamelCase_ : Dict , ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = vocab_size SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings SCREAMING_SNAKE_CASE : List[Any] = d_model SCREAMING_SNAKE_CASE : List[Any] = encoder_ffn_dim SCREAMING_SNAKE_CASE : Optional[Any] = encoder_layers SCREAMING_SNAKE_CASE : List[Any] = encoder_attention_heads SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_ffn_dim SCREAMING_SNAKE_CASE : str = decoder_layers SCREAMING_SNAKE_CASE : Optional[int] = decoder_attention_heads SCREAMING_SNAKE_CASE : str = dropout SCREAMING_SNAKE_CASE : Any = attention_dropout SCREAMING_SNAKE_CASE : Any = activation_dropout SCREAMING_SNAKE_CASE : List[Any] = activation_function SCREAMING_SNAKE_CASE : Any = init_std SCREAMING_SNAKE_CASE : Optional[Any] = encoder_layerdrop SCREAMING_SNAKE_CASE : Dict = decoder_layerdrop SCREAMING_SNAKE_CASE : Any = use_cache SCREAMING_SNAKE_CASE : Optional[int] = encoder_layers SCREAMING_SNAKE_CASE : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True SCREAMING_SNAKE_CASE : List[str] = router_z_loss_coef SCREAMING_SNAKE_CASE : str = router_aux_loss_coef SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_sparse_step SCREAMING_SNAKE_CASE : Optional[int] = encoder_sparse_step SCREAMING_SNAKE_CASE : int = num_experts SCREAMING_SNAKE_CASE : int = expert_capacity SCREAMING_SNAKE_CASE : int = router_bias if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = router_dtype SCREAMING_SNAKE_CASE : Optional[Any] = router_ignore_padding_tokens SCREAMING_SNAKE_CASE : str = batch_prioritized_routing SCREAMING_SNAKE_CASE : Optional[Any] = second_expert_policy SCREAMING_SNAKE_CASE : Dict = normalize_router_prob_before_dropping SCREAMING_SNAKE_CASE : Optional[int] = moe_eval_capacity_token_fraction SCREAMING_SNAKE_CASE : Tuple = moe_token_dropout SCREAMING_SNAKE_CASE : List[Any] = output_router_logits super().__init__( pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , is_encoder_decoder=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
717
'''simple docstring''' __UpperCAmelCase = [ """Audio""", """Array2D""", """Array3D""", """Array4D""", """Array5D""", """ClassLabel""", """Features""", """Sequence""", """Value""", """Image""", """Translation""", """TranslationVariableLanguages""", ] from .audio import Audio from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value from .image import Image from .translation import Translation, TranslationVariableLanguages
79
0
'''simple docstring''' import requests __UpperCAmelCase = """YOUR API KEY""" def __A ( lowerCamelCase_ , lowerCamelCase_ = giphy_api_key ): """simple docstring""" SCREAMING_SNAKE_CASE : str = "+".join(query.split() ) SCREAMING_SNAKE_CASE : Optional[int] = f'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}''' SCREAMING_SNAKE_CASE : Dict = requests.get(lowerCamelCase_ ).json()["data"] return [gif["url"] for gif in gifs] if __name__ == "__main__": print("""\n""".join(get_gifs("""space ship""")))
718
'''simple docstring''' from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) __UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name __UpperCAmelCase = """ Examples: ```py >>> import torch >>> import numpy as np >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline >>> from transformers import pipeline >>> from diffusers.utils import load_image >>> def make_hint(image, depth_estimator): ... image = depth_estimator(image)[\"depth\"] ... image = np.array(image) ... image = image[:, :, None] ... image = np.concatenate([image, image, image], axis=2) ... detected_map = torch.from_numpy(image).float() / 255.0 ... hint = detected_map.permute(2, 0, 1) ... return hint >>> depth_estimator = pipeline(\"depth-estimation\") >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained( ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16 ... ) >>> pipe_prior = pipe_prior.to(\"cuda\") >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained( ... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16 ... ) >>> pipe = pipe.to(\"cuda\") >>> img = load_image( ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\" ... \"/kandinsky/cat.png\" ... ).resize((768, 768)) >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\") >>> prompt = \"A robot, 4k photo\" >>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\" >>> generator = torch.Generator(device=\"cuda\").manual_seed(43) >>> image_emb, zero_image_emb = pipe_prior( ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator ... ).to_tuple() >>> images = pipe( ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... hint=hint, ... num_inference_steps=50, ... generator=generator, ... height=768, ... width=768, ... ).images >>> images[0].save(\"robot_cat.png\") ``` """ def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=8 ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 SCREAMING_SNAKE_CASE : List[str] = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Union[str, Any] , lowerCamelCase_ : UNetaDConditionModel , lowerCamelCase_ : DDPMScheduler , lowerCamelCase_ : VQModel , ): '''simple docstring''' super().__init__() self.register_modules( unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , movq=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : str = 2 ** (len(self.movq.config.block_out_channels ) - 1) def lowerCamelCase_ ( self : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple , lowerCamelCase_ : int ): '''simple docstring''' if latents is None: SCREAMING_SNAKE_CASE : Tuple = randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ , device=lowerCamelCase_ , dtype=lowerCamelCase_ ) else: if latents.shape != shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) SCREAMING_SNAKE_CASE : Dict = latents.to(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = latents * scheduler.init_noise_sigma return latents def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Dict=0 ): '''simple docstring''' if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) SCREAMING_SNAKE_CASE : List[Any] = torch.device(f'''cuda:{gpu_id}''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Tuple=0 ): '''simple docstring''' if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ): from accelerate import cpu_offload_with_hook else: raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" ) SCREAMING_SNAKE_CASE : Any = torch.device(f'''cuda:{gpu_id}''' ) if self.device.type != "cpu": self.to("""cpu""" , silence_dtype_warnings=lowerCamelCase_ ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) SCREAMING_SNAKE_CASE : Union[str, Any] = None for cpu_offloaded_model in [self.unet, self.movq]: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = cpu_offload_with_hook(lowerCamelCase_ , lowerCamelCase_ , prev_module_hook=lowerCamelCase_ ) # We'll offload the last model manually. SCREAMING_SNAKE_CASE : str = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def lowerCamelCase_ ( self : str ): '''simple docstring''' if not hasattr(self.unet , """_hf_hook""" ): return self.device for module in self.unet.modules(): if ( hasattr(lowerCamelCase_ , """_hf_hook""" ) and hasattr(module._hf_hook , """execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(lowerCamelCase_ ) def __call__( self : Optional[Any] , lowerCamelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCamelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 1_00 , lowerCamelCase_ : float = 4.0 , lowerCamelCase_ : int = 1 , lowerCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ : Optional[torch.FloatTensor] = None , lowerCamelCase_ : Optional[str] = "pil" , lowerCamelCase_ : bool = True , ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self._execution_device SCREAMING_SNAKE_CASE : Optional[int] = guidance_scale > 1.0 if isinstance(lowerCamelCase_ , lowerCamelCase_ ): SCREAMING_SNAKE_CASE : int = torch.cat(lowerCamelCase_ , dim=0 ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Dict = torch.cat(lowerCamelCase_ , dim=0 ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat(lowerCamelCase_ , dim=0 ) SCREAMING_SNAKE_CASE : Any = image_embeds.shape[0] * num_images_per_prompt if do_classifier_free_guidance: SCREAMING_SNAKE_CASE : List[Any] = image_embeds.repeat_interleave(lowerCamelCase_ , dim=0 ) SCREAMING_SNAKE_CASE : Optional[int] = negative_image_embeds.repeat_interleave(lowerCamelCase_ , dim=0 ) SCREAMING_SNAKE_CASE : Dict = hint.repeat_interleave(lowerCamelCase_ , dim=0 ) SCREAMING_SNAKE_CASE : List[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCamelCase_ ) self.scheduler.set_timesteps(lowerCamelCase_ , device=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler.timesteps SCREAMING_SNAKE_CASE : Any = self.movq.config.latent_channels SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = downscale_height_and_width(lowerCamelCase_ , lowerCamelCase_ , self.movq_scale_factor ) # create initial latent SCREAMING_SNAKE_CASE : str = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , self.scheduler , ) for i, t in enumerate(self.progress_bar(lowerCamelCase_ ) ): # expand the latents if we are doing classifier free guidance SCREAMING_SNAKE_CASE : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents SCREAMING_SNAKE_CASE : Union[str, Any] = {"""image_embeds""": image_embeds, """hint""": hint} SCREAMING_SNAKE_CASE : Dict = self.unet( sample=lowerCamelCase_ , timestep=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , added_cond_kwargs=lowerCamelCase_ , return_dict=lowerCamelCase_ , )[0] if do_classifier_free_guidance: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = noise_pred.split(latents.shape[1] , dim=1 ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = noise_pred.chunk(2 ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = variance_pred.chunk(2 ) SCREAMING_SNAKE_CASE : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) SCREAMING_SNAKE_CASE : str = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , """variance_type""" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 SCREAMING_SNAKE_CASE : str = self.scheduler.step( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ , )[0] # post-processing SCREAMING_SNAKE_CASE : List[str] = self.movq.decode(lowerCamelCase_ , force_not_quantize=lowerCamelCase_ )["""sample"""] if output_type not in ["pt", "np", "pil"]: raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' ) if output_type in ["np", "pil"]: SCREAMING_SNAKE_CASE : Optional[int] = image * 0.5 + 0.5 SCREAMING_SNAKE_CASE : List[Any] = image.clamp(0 , 1 ) SCREAMING_SNAKE_CASE : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": SCREAMING_SNAKE_CASE : Any = self.numpy_to_pil(lowerCamelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCamelCase_ )
79
0
'''simple docstring''' import logging import os from typing import List, Tuple import numpy as np import psutil import torch import torch.distributed as dist from transformers import RagRetriever __UpperCAmelCase = logging.getLogger(__name__) class UpperCamelCase__ ( __lowerCamelCase ): """simple docstring""" def __init__( self : int , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict , lowerCamelCase_ : str=None ): '''simple docstring''' super().__init__( SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=SCREAMING_SNAKE_CASE_ , generator_tokenizer=SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ , init_retrieval=SCREAMING_SNAKE_CASE_ , ) SCREAMING_SNAKE_CASE : Optional[Any] = None def lowerCamelCase_ ( self : Any , lowerCamelCase_ : int ): '''simple docstring''' logger.info("""initializing retrieval""" ) # initializing a separate process group for retrieval as the default # nccl backend doesn't support gather/scatter operations while gloo # is too slow to replace nccl for the core gpu communication if dist.is_initialized(): logger.info("""dist initialized""" ) # needs to be set manually SCREAMING_SNAKE_CASE : Optional[Any] = self._infer_socket_ifname() # avoid clash with the NCCL port SCREAMING_SNAKE_CASE : Any = str(distributed_port + 1 ) SCREAMING_SNAKE_CASE : Tuple = dist.new_group(ranks=SCREAMING_SNAKE_CASE_ , backend="""gloo""" ) # initialize retriever only on the main worker if not dist.is_initialized() or self._is_main(): logger.info("""dist not initialized / main""" ) self.index.init_index() # all processes wait untill the retriever is initialized by the main process if dist.is_initialized(): torch.distributed.barrier(group=self.process_group ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' return dist.get_rank(group=self.process_group ) == 0 def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : str=torch.floataa ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = torch.empty(SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ ) dist.scatter(SCREAMING_SNAKE_CASE_ , src=0 , scatter_list=SCREAMING_SNAKE_CASE_ , group=self.process_group ) return target_tensor def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = psutil.net_if_addrs() # a hacky way to deal with varying network interface names SCREAMING_SNAKE_CASE : str = next((addr for addr in addrs if addr.startswith("""e""" )) , SCREAMING_SNAKE_CASE_ ) return ifname def lowerCamelCase_ ( self : str , lowerCamelCase_ : Any , lowerCamelCase_ : int ): '''simple docstring''' if not dist.is_initialized(): SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self._main_retrieve(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(SCREAMING_SNAKE_CASE_ ) # distributed training SCREAMING_SNAKE_CASE : str = dist.get_world_size(group=self.process_group ) # gather logic SCREAMING_SNAKE_CASE : List[Any] = None if self._is_main(): SCREAMING_SNAKE_CASE : Any = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(SCREAMING_SNAKE_CASE_ )] dist.gather(torch.tensor(SCREAMING_SNAKE_CASE_ ) , dst=0 , gather_list=SCREAMING_SNAKE_CASE_ , group=self.process_group ) # scatter logic SCREAMING_SNAKE_CASE : Union[str, Any] = question_hidden_states.shape[0] SCREAMING_SNAKE_CASE : str = [] SCREAMING_SNAKE_CASE : List[Any] = [] if self._is_main(): assert len(SCREAMING_SNAKE_CASE_ ) == world_size SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = self._main_retrieve(torch.cat(SCREAMING_SNAKE_CASE_ ).numpy() , SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(SCREAMING_SNAKE_CASE_ ), torch.tensor(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE : Any = self._chunk_tensor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE : str = self._chunk_tensor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE : str = self._scattered(SCREAMING_SNAKE_CASE_ , [n_queries, n_docs] , target_type=torch.intaa ) SCREAMING_SNAKE_CASE : Tuple = self._scattered(SCREAMING_SNAKE_CASE_ , [n_queries, n_docs, question_hidden_states.shape[1]] ) return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(SCREAMING_SNAKE_CASE_ )
719
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: __UpperCAmelCase = None __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} __UpperCAmelCase = { """vocab_file""": { """google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""", """google/bigbird-roberta-large""": ( """https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model""" ), """google/bigbird-base-trivia-itc""": ( """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model""" ), }, """tokenizer_file""": { """google/bigbird-roberta-base""": ( """https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json""" ), """google/bigbird-roberta-large""": ( """https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json""" ), """google/bigbird-base-trivia-itc""": ( """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json""" ), }, } __UpperCAmelCase = { """google/bigbird-roberta-base""": 4096, """google/bigbird-roberta-large""": 4096, """google/bigbird-base-trivia-itc""": 4096, } __UpperCAmelCase = """▁""" class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ = BigBirdTokenizer SCREAMING_SNAKE_CASE__ = ['''input_ids''', '''attention_mask'''] SCREAMING_SNAKE_CASE__ = [] def __init__( self : Any , lowerCamelCase_ : str=None , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict="<unk>" , lowerCamelCase_ : int="<s>" , lowerCamelCase_ : Optional[Any]="</s>" , lowerCamelCase_ : Dict="<pad>" , lowerCamelCase_ : Tuple="[SEP]" , lowerCamelCase_ : Dict="[MASK]" , lowerCamelCase_ : Union[str, Any]="[CLS]" , **lowerCamelCase_ : Dict , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else bos_token SCREAMING_SNAKE_CASE : Dict = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token SCREAMING_SNAKE_CASE : int = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token SCREAMING_SNAKE_CASE : Any = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else cls_token SCREAMING_SNAKE_CASE : Any = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE : int = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token super().__init__( lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , **lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : List[Any] = vocab_file SCREAMING_SNAKE_CASE : Optional[Any] = False if not self.vocab_file else True def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = [self.sep_token_id] SCREAMING_SNAKE_CASE : int = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ): '''simple docstring''' if already_has_special_tokens: if token_ids_a is not None: raise ValueError( """You should not supply a second sequence if the provided sequence of """ """ids is already formatted with special tokens for the model.""" ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase_ )) + [1] return [1] + ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) + [1] def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = [self.sep_token_id] SCREAMING_SNAKE_CASE : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCamelCase_ ( self : str , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(lowerCamelCase_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return SCREAMING_SNAKE_CASE : Tuple = os.path.join( lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ): copyfile(self.vocab_file , lowerCamelCase_ ) return (out_vocab_file,)
79
0
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class UpperCamelCase__ ( _snake_case , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = DiTPipeline SCREAMING_SNAKE_CASE__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS SCREAMING_SNAKE_CASE__ = PipelineTesterMixin.required_optional_params - { """latents""", """num_images_per_prompt""", """callback""", """callback_steps""", } SCREAMING_SNAKE_CASE__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS SCREAMING_SNAKE_CASE__ = False def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Union[str, Any] = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=snake_case_ , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=10_00 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=snake_case_ , ) SCREAMING_SNAKE_CASE : int = AutoencoderKL() SCREAMING_SNAKE_CASE : List[str] = DDIMScheduler() SCREAMING_SNAKE_CASE : Dict = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler} return components def lowerCamelCase_ ( self : str , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[str]=0 ): '''simple docstring''' if str(snake_case_ ).startswith("""mps""" ): SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(snake_case_ ) else: SCREAMING_SNAKE_CASE : List[Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ ) SCREAMING_SNAKE_CASE : List[Any] = { "class_labels": [1], "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = "cpu" SCREAMING_SNAKE_CASE : str = self.get_dummy_components() SCREAMING_SNAKE_CASE : Union[str, Any] = self.pipeline_class(**snake_case_ ) pipe.to(snake_case_ ) pipe.set_progress_bar_config(disable=snake_case_ ) SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs(snake_case_ ) SCREAMING_SNAKE_CASE : List[str] = pipe(**snake_case_ ).images SCREAMING_SNAKE_CASE : Optional[int] = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) SCREAMING_SNAKE_CASE : List[str] = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] ) SCREAMING_SNAKE_CASE : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(snake_case_ , 1e-3 ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' self._test_inference_batch_single_identical(relax_max_difference=snake_case_ , expected_max_diff=1e-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @require_torch_gpu @slow class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Dict = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" ) pipe.to("""cuda""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = ["vase", "umbrella", "white shark", "white wolf"] SCREAMING_SNAKE_CASE : Optional[Any] = pipe.get_label_ids(snake_case_ ) SCREAMING_SNAKE_CASE : Tuple = pipe(snake_case_ , generator=snake_case_ , num_inference_steps=40 , output_type="""np""" ).images for word, image in zip(snake_case_ , snake_case_ ): SCREAMING_SNAKE_CASE : str = load_numpy( f'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' ) assert np.abs((expected_image - image).max() ) < 1e-2 def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" ) SCREAMING_SNAKE_CASE : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to("""cuda""" ) SCREAMING_SNAKE_CASE : str = ["vase", "umbrella"] SCREAMING_SNAKE_CASE : Any = pipe.get_label_ids(snake_case_ ) SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Tuple = pipe(snake_case_ , generator=snake_case_ , num_inference_steps=25 , output_type="""np""" ).images for word, image in zip(snake_case_ , snake_case_ ): SCREAMING_SNAKE_CASE : Any = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" f'''/dit/{word}_512.npy''' ) assert np.abs((expected_image - image).max() ) < 1e-1
720
'''simple docstring''' import unittest import torch from torch import nn from diffusers.models.activations import get_activation class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = get_activation("""swish""" ) self.assertIsInstance(lowerCamelCase_ , nn.SiLU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = get_activation("""silu""" ) self.assertIsInstance(lowerCamelCase_ , nn.SiLU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = get_activation("""mish""" ) self.assertIsInstance(lowerCamelCase_ , nn.Mish ) self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = get_activation("""gelu""" ) self.assertIsInstance(lowerCamelCase_ , nn.GELU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
79
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __UpperCAmelCase = { """configuration_squeezebert""": [ """SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SqueezeBertConfig""", """SqueezeBertOnnxConfig""", ], """tokenization_squeezebert""": ["""SqueezeBertTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ["""SqueezeBertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ """SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """SqueezeBertForMaskedLM""", """SqueezeBertForMultipleChoice""", """SqueezeBertForQuestionAnswering""", """SqueezeBertForSequenceClassification""", """SqueezeBertForTokenClassification""", """SqueezeBertModel""", """SqueezeBertModule""", """SqueezeBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_squeezebert import ( SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertOnnxConfig, ) from .tokenization_squeezebert import SqueezeBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_squeezebert import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, SqueezeBertModule, SqueezeBertPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
721
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { """microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""", """microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""", """microsoft/deberta-v2-xlarge-mnli""": ( """https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json""" ), """microsoft/deberta-v2-xxlarge-mnli""": ( """https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json""" ), } class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''deberta-v2''' def __init__( self : int , lowerCamelCase_ : Optional[Any]=12_81_00 , lowerCamelCase_ : str=15_36 , lowerCamelCase_ : int=24 , lowerCamelCase_ : List[str]=24 , lowerCamelCase_ : List[Any]=61_44 , lowerCamelCase_ : List[Any]="gelu" , lowerCamelCase_ : Optional[Any]=0.1 , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : str=5_12 , lowerCamelCase_ : str=0 , lowerCamelCase_ : Union[str, Any]=0.02 , lowerCamelCase_ : Dict=1e-7 , lowerCamelCase_ : Optional[int]=False , lowerCamelCase_ : Optional[int]=-1 , lowerCamelCase_ : List[str]=0 , lowerCamelCase_ : Tuple=True , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : Optional[Any]=0 , lowerCamelCase_ : Dict="gelu" , **lowerCamelCase_ : Optional[int] , ): '''simple docstring''' super().__init__(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = hidden_size SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers SCREAMING_SNAKE_CASE : int = num_attention_heads SCREAMING_SNAKE_CASE : List[str] = intermediate_size SCREAMING_SNAKE_CASE : int = hidden_act SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : str = max_position_embeddings SCREAMING_SNAKE_CASE : Tuple = type_vocab_size SCREAMING_SNAKE_CASE : Optional[int] = initializer_range SCREAMING_SNAKE_CASE : List[Any] = relative_attention SCREAMING_SNAKE_CASE : str = max_relative_positions SCREAMING_SNAKE_CASE : int = pad_token_id SCREAMING_SNAKE_CASE : List[str] = position_biased_input # Backwards compatibility if type(lowerCamelCase_ ) == str: SCREAMING_SNAKE_CASE : Dict = [x.strip() for x in pos_att_type.lower().split("""|""" )] SCREAMING_SNAKE_CASE : Any = pos_att_type SCREAMING_SNAKE_CASE : Any = vocab_size SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_eps SCREAMING_SNAKE_CASE : str = kwargs.get("""pooler_hidden_size""" , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = pooler_dropout SCREAMING_SNAKE_CASE : Any = pooler_hidden_act class UpperCamelCase__ ( lowercase_ ): """simple docstring""" @property def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' if self.task == "multiple-choice": SCREAMING_SNAKE_CASE : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: SCREAMING_SNAKE_CASE : Union[str, Any] = {0: """batch""", 1: """sequence"""} if self._config.type_vocab_size > 0: return OrderedDict( [("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] ) else: return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] ) @property def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' return 12 def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCamelCase_ : int = -1 , lowerCamelCase_ : int = -1 , lowerCamelCase_ : int = -1 , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional["TensorType"] = None , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 40 , lowerCamelCase_ : int = 40 , lowerCamelCase_ : "PreTrainedTokenizerBase" = None , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = super().generate_dummy_inputs(preprocessor=lowerCamelCase_ , framework=lowerCamelCase_ ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
79
0
'''simple docstring''' import argparse import torch from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert from transformers.utils import logging logging.set_verbosity_info() def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = RemBertConfig.from_json_file(_lowerCAmelCase ) print("""Building PyTorch model from configuration: {}""".format(str(_lowerCAmelCase ) ) ) SCREAMING_SNAKE_CASE : Optional[int] = RemBertModel(_lowerCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_rembert(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # Save pytorch-model print("""Save PyTorch model to {}""".format(_lowerCAmelCase ) ) torch.save(model.state_dict() , _lowerCAmelCase ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--rembert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained RemBERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) __UpperCAmelCase = parser.parse_args() convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
700
'''simple docstring''' from collections import deque from math import floor from random import random from time import time class UpperCamelCase__ : """simple docstring""" def __init__( self : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = {} def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[int]=1 ): '''simple docstring''' if self.graph.get(lowerCamelCase_ ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: SCREAMING_SNAKE_CASE : str = [[w, v]] if not self.graph.get(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Tuple = [] def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' return list(self.graph ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : str ): '''simple docstring''' if self.graph.get(lowerCamelCase_ ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any]=-2 , lowerCamelCase_ : str=-1 ): '''simple docstring''' if s == d: return [] SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : List[str] = [] if s == -2: SCREAMING_SNAKE_CASE : Tuple = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : Optional[Any] = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowerCamelCase_ ) return visited else: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : Tuple = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : int = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : Any = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return visited def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Optional[int]=-1 ): '''simple docstring''' if c == -1: SCREAMING_SNAKE_CASE : str = floor(random() * 1_00_00 ) + 10 for i in range(lowerCamelCase_ ): # every vertex has max 100 edges for _ in range(floor(random() * 1_02 ) + 1 ): SCREAMING_SNAKE_CASE : Union[str, Any] = floor(random() * c ) + 1 if n != i: self.add_pair(lowerCamelCase_ , lowerCamelCase_ , 1 ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Any=-2 ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = deque() SCREAMING_SNAKE_CASE : List[str] = [] if s == -2: SCREAMING_SNAKE_CASE : int = list(self.graph )[0] d.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) while d: SCREAMING_SNAKE_CASE : Dict = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Tuple ): '''simple docstring''' return len(self.graph[u] ) def lowerCamelCase_ ( self : str , lowerCamelCase_ : Optional[Any]=-2 ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = [] SCREAMING_SNAKE_CASE : List[str] = [] if s == -2: SCREAMING_SNAKE_CASE : Union[str, Any] = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = s SCREAMING_SNAKE_CASE : List[str] = [] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : Optional[Any] = s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : int = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : List[Any] = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : int = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return sorted_nodes def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = [] SCREAMING_SNAKE_CASE : Any = [] SCREAMING_SNAKE_CASE : List[Any] = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = -2 SCREAMING_SNAKE_CASE : int = [] SCREAMING_SNAKE_CASE : Union[str, Any] = s SCREAMING_SNAKE_CASE : Tuple = False SCREAMING_SNAKE_CASE : Union[str, Any] = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : Union[str, Any] = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): SCREAMING_SNAKE_CASE : Any = len(lowerCamelCase_ ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : int = node[1] break # check if all the children are visited if s == ss: stack.pop() SCREAMING_SNAKE_CASE : int = True if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : Any = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : List[str] = False indirect_parents.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = s SCREAMING_SNAKE_CASE : List[Any] = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return list(lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = [] SCREAMING_SNAKE_CASE : int = [] SCREAMING_SNAKE_CASE : Any = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = -2 SCREAMING_SNAKE_CASE : List[Any] = [] SCREAMING_SNAKE_CASE : Tuple = s SCREAMING_SNAKE_CASE : Dict = False SCREAMING_SNAKE_CASE : Optional[Any] = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : str = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): SCREAMING_SNAKE_CASE : str = len(lowerCamelCase_ ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : Optional[Any] = node[1] break # check if all the children are visited if s == ss: stack.pop() SCREAMING_SNAKE_CASE : Dict = True if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : List[str] = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : List[Any] = False indirect_parents.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = s SCREAMING_SNAKE_CASE : Optional[int] = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return False def lowerCamelCase_ ( self : Any , lowerCamelCase_ : str=-2 , lowerCamelCase_ : int=-1 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = time() self.dfs(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = time() return end - begin def lowerCamelCase_ ( self : int , lowerCamelCase_ : Tuple=-2 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = time() self.bfs(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = time() return end - begin class UpperCamelCase__ : """simple docstring""" def __init__( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = {} def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Union[str, Any]=1 ): '''simple docstring''' if self.graph.get(lowerCamelCase_ ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist SCREAMING_SNAKE_CASE : Any = [[w, v]] # add the other way if self.graph.get(lowerCamelCase_ ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist SCREAMING_SNAKE_CASE : Any = [[w, u]] def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any ): '''simple docstring''' if self.graph.get(lowerCamelCase_ ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowerCamelCase_ ) # the other way round if self.graph.get(lowerCamelCase_ ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(lowerCamelCase_ ) def lowerCamelCase_ ( self : int , lowerCamelCase_ : str=-2 , lowerCamelCase_ : List[str]=-1 ): '''simple docstring''' if s == d: return [] SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : Any = [] if s == -2: SCREAMING_SNAKE_CASE : List[Any] = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : Union[str, Any] = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowerCamelCase_ ) return visited else: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : Any = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : Any = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : List[str] = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return visited def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[str]=-1 ): '''simple docstring''' if c == -1: SCREAMING_SNAKE_CASE : Any = floor(random() * 1_00_00 ) + 10 for i in range(lowerCamelCase_ ): # every vertex has max 100 edges for _ in range(floor(random() * 1_02 ) + 1 ): SCREAMING_SNAKE_CASE : List[str] = floor(random() * c ) + 1 if n != i: self.add_pair(lowerCamelCase_ , lowerCamelCase_ , 1 ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[Any]=-2 ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = deque() SCREAMING_SNAKE_CASE : Tuple = [] if s == -2: SCREAMING_SNAKE_CASE : Any = list(self.graph )[0] d.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) while d: SCREAMING_SNAKE_CASE : List[Any] = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : str ): '''simple docstring''' return len(self.graph[u] ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : Any = [] SCREAMING_SNAKE_CASE : Optional[Any] = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = -2 SCREAMING_SNAKE_CASE : Tuple = [] SCREAMING_SNAKE_CASE : Any = s SCREAMING_SNAKE_CASE : Optional[Any] = False SCREAMING_SNAKE_CASE : Optional[Any] = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : Optional[Any] = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCamelCase_ ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : str = node[1] break # check if all the children are visited if s == ss: stack.pop() SCREAMING_SNAKE_CASE : Optional[int] = True if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : int = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : Union[str, Any] = False indirect_parents.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = s SCREAMING_SNAKE_CASE : str = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return list(lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = [] SCREAMING_SNAKE_CASE : int = [] SCREAMING_SNAKE_CASE : Any = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = -2 SCREAMING_SNAKE_CASE : int = [] SCREAMING_SNAKE_CASE : int = s SCREAMING_SNAKE_CASE : Union[str, Any] = False SCREAMING_SNAKE_CASE : Tuple = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : Any = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCamelCase_ ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : Tuple = node[1] break # check if all the children are visited if s == ss: stack.pop() SCREAMING_SNAKE_CASE : Any = True if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : str = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : Optional[Any] = False indirect_parents.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = s SCREAMING_SNAKE_CASE : Tuple = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return False def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return list(self.graph ) def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[str]=-2 , lowerCamelCase_ : str=-1 ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = time() self.dfs(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = time() return end - begin def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Dict=-2 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = time() self.bfs(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = time() return end - begin
79
0
'''simple docstring''' from __future__ import annotations from math import pi def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" if (inductance, frequency, reactance).count(0 ) != 1: raise ValueError("""One and only one argument must be 0""" ) if inductance < 0: raise ValueError("""Inductance cannot be negative""" ) if frequency < 0: raise ValueError("""Frequency cannot be negative""" ) if reactance < 0: raise ValueError("""Inductive reactance cannot be negative""" ) if inductance == 0: return {"inductance": reactance / (2 * pi * frequency)} elif frequency == 0: return {"frequency": reactance / (2 * pi * inductance)} elif reactance == 0: return {"reactance": 2 * pi * frequency * inductance} else: raise ValueError("""Exactly one argument must be 0""" ) if __name__ == "__main__": import doctest doctest.testmod()
701
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {"""vocab_file""": """sentencepiece.bpe.model"""} __UpperCAmelCase = { """vocab_file""": { """moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez-orangesum-title""": ( """https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model""" ), }, } __UpperCAmelCase = { """moussaKam/mbarthez""": 1024, """moussaKam/barthez""": 1024, """moussaKam/barthez-orangesum-title""": 1024, } __UpperCAmelCase = """▁""" class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ = ['''input_ids''', '''attention_mask'''] def __init__( self : Optional[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple="<s>" , lowerCamelCase_ : Union[str, Any]="</s>" , lowerCamelCase_ : Tuple="</s>" , lowerCamelCase_ : str="<s>" , lowerCamelCase_ : Optional[int]="<unk>" , lowerCamelCase_ : List[Any]="<pad>" , lowerCamelCase_ : Optional[Any]="<mask>" , lowerCamelCase_ : Optional[Dict[str, Any]] = None , **lowerCamelCase_ : List[str] , ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token SCREAMING_SNAKE_CASE : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Dict = vocab_file SCREAMING_SNAKE_CASE : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Optional[int] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3} SCREAMING_SNAKE_CASE : str = len(self.sp_model ) - 1 SCREAMING_SNAKE_CASE : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id] SCREAMING_SNAKE_CASE : int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase_ )) + [1] return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1] def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id] SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' return len(self.sp_model ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ): '''simple docstring''' return self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : str ): '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] SCREAMING_SNAKE_CASE : List[str] = self.sp_model.PieceToId(lowerCamelCase_ ) return spm_id if spm_id else self.unk_token_id def lowerCamelCase_ ( self : int , lowerCamelCase_ : List[str] ): '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = [] SCREAMING_SNAKE_CASE : Tuple = """""" SCREAMING_SNAKE_CASE : Dict = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(lowerCamelCase_ ) + token SCREAMING_SNAKE_CASE : str = True SCREAMING_SNAKE_CASE : Optional[Any] = [] else: current_sub_tokens.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = False out_string += self.sp_model.decode(lowerCamelCase_ ) return out_string.strip() def __getstate__( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.__dict__.copy() SCREAMING_SNAKE_CASE : List[Any] = None return state def __setstate__( self : Optional[Any] , lowerCamelCase_ : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): SCREAMING_SNAKE_CASE : int = {} SCREAMING_SNAKE_CASE : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(lowerCamelCase_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return SCREAMING_SNAKE_CASE : Dict = os.path.join( lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCamelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(lowerCamelCase_ , """wb""" ) as fi: SCREAMING_SNAKE_CASE : Any = self.sp_model.serialized_model_proto() fi.write(lowerCamelCase_ ) return (out_vocab_file,)
79
0
'''simple docstring''' import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import load_iris from sklearn.metrics import ConfusionMatrixDisplay from sklearn.model_selection import train_test_split from xgboost import XGBClassifier def __A ( lowerCamelCase_ ): """simple docstring""" return (data["data"], data["target"]) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = XGBClassifier() classifier.fit(UpperCamelCase__ , UpperCamelCase__ ) return classifier def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : int = load_iris() SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = data_handling(UpperCamelCase__ ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = train_test_split( UpperCamelCase__ , UpperCamelCase__ , test_size=0.25 ) SCREAMING_SNAKE_CASE : Tuple = iris["""target_names"""] # Create an XGBoost Classifier from the training data SCREAMING_SNAKE_CASE : Union[str, Any] = xgboost(UpperCamelCase__ , UpperCamelCase__ ) # Display the confusion matrix of the classifier with both training and test sets ConfusionMatrixDisplay.from_estimator( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , display_labels=UpperCamelCase__ , cmap="""Blues""" , normalize="""true""" , ) plt.title("""Normalized Confusion Matrix - IRIS Dataset""" ) plt.show() if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
702
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFXLMRobertaModel @require_tf @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = TFXLMRobertaModel.from_pretrained("""jplu/tf-xlm-roberta-base""" ) SCREAMING_SNAKE_CASE : Dict = { """input_ids""": tf.convert_to_tensor([[0, 26_46, 1_02_69, 83, 9_99_42, 2]] , dtype=tf.intaa ), # "My dog is cute" """attention_mask""": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ), } SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ )["""last_hidden_state"""] SCREAMING_SNAKE_CASE : Union[str, Any] = tf.TensorShape((1, 6, 7_68) ) self.assertEqual(output.shape , lowerCamelCase_ ) # compare the actual values for a slice. SCREAMING_SNAKE_CASE : Optional[int] = tf.convert_to_tensor( [ [ [0.0_681_762, 0.10_894_451, 0.06_772_504], [-0.06_423_668, 0.02_366_615, 0.04_329_344], [-0.06_057_295, 0.09_974_135, -0.00_070_584], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
79
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __UpperCAmelCase = logging.get_logger(__name__) class UpperCamelCase__ ( snake_case__ , snake_case__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''maskformer-swin''' SCREAMING_SNAKE_CASE__ = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self : Dict , lowerCamelCase_ : int=2_24 , lowerCamelCase_ : str=4 , lowerCamelCase_ : int=3 , lowerCamelCase_ : str=96 , lowerCamelCase_ : Any=[2, 2, 6, 2] , lowerCamelCase_ : Any=[3, 6, 12, 24] , lowerCamelCase_ : List[Any]=7 , lowerCamelCase_ : Union[str, Any]=4.0 , lowerCamelCase_ : str=True , lowerCamelCase_ : Optional[Any]=0.0 , lowerCamelCase_ : int=0.0 , lowerCamelCase_ : int=0.1 , lowerCamelCase_ : List[Any]="gelu" , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : str=0.02 , lowerCamelCase_ : Optional[Any]=1e-5 , lowerCamelCase_ : Any=None , lowerCamelCase_ : int=None , **lowerCamelCase_ : Any , ): '''simple docstring''' super().__init__(**lowercase_ ) SCREAMING_SNAKE_CASE : List[str] = image_size SCREAMING_SNAKE_CASE : Any = patch_size SCREAMING_SNAKE_CASE : List[Any] = num_channels SCREAMING_SNAKE_CASE : str = embed_dim SCREAMING_SNAKE_CASE : Union[str, Any] = depths SCREAMING_SNAKE_CASE : Optional[Any] = len(lowercase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = num_heads SCREAMING_SNAKE_CASE : Union[str, Any] = window_size SCREAMING_SNAKE_CASE : str = mlp_ratio SCREAMING_SNAKE_CASE : List[str] = qkv_bias SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : int = drop_path_rate SCREAMING_SNAKE_CASE : str = hidden_act SCREAMING_SNAKE_CASE : int = use_absolute_embeddings SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps SCREAMING_SNAKE_CASE : List[Any] = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model SCREAMING_SNAKE_CASE : int = int(embed_dim * 2 ** (len(lowercase_ ) - 1) ) SCREAMING_SNAKE_CASE : Any = ["stem"] + [f'''stage{idx}''' for idx in range(1 , len(lowercase_ ) + 1 )] SCREAMING_SNAKE_CASE : Tuple = get_aligned_output_features_output_indices( out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names )
703
'''simple docstring''' from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = None # Automatically constructed SCREAMING_SNAKE_CASE__ = "dict" SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = field(default='''Translation''' , init=lowercase_ , repr=lowercase_ ) def __call__( self : int ): '''simple docstring''' return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' from .features import Value return {k: Value("""string""" ) for k in sorted(self.languages )} @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None # Automatically constructed SCREAMING_SNAKE_CASE__ = "dict" SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = field(default='''TranslationVariableLanguages''' , init=lowercase_ , repr=lowercase_ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = sorted(set(self.languages ) ) if self.languages else None SCREAMING_SNAKE_CASE : str = len(self.languages ) if self.languages else None def __call__( self : Tuple ): '''simple docstring''' return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = set(self.languages ) if self.languages and set(lowerCamelCase_ ) - lang_set: raise ValueError( f'''Some languages in example ({", ".join(sorted(set(lowerCamelCase_ ) - lang_set ) )}) are not in valid set ({", ".join(lowerCamelCase_ )}).''' ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. SCREAMING_SNAKE_CASE : List[Any] = [] for lang, text in translation_dict.items(): if isinstance(lowerCamelCase_ , lowerCamelCase_ ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = zip(*sorted(lowerCamelCase_ ) ) return {"language": languages, "translation": translations} def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' from .features import Sequence, Value return { "language": Sequence(Value("""string""" ) ), "translation": Sequence(Value("""string""" ) ), }
79
0
'''simple docstring''' def __A ( lowerCamelCase_ ): """simple docstring""" if not grid or not grid[0]: raise TypeError("""The grid does not contain the appropriate information""" ) for cell_n in range(1 , len(grid[0] ) ): grid[0][cell_n] += grid[0][cell_n - 1] SCREAMING_SNAKE_CASE : int = grid[0] for row_n in range(1 , len(lowerCamelCase_ ) ): SCREAMING_SNAKE_CASE : List[str] = grid[row_n] SCREAMING_SNAKE_CASE : int = fill_row(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = grid[row_n] return grid[-1][-1] def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" current_row[0] += row_above[0] for cell_n in range(1 , len(lowerCamelCase_ ) ): current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] ) return current_row if __name__ == "__main__": import doctest doctest.testmod()
704
'''simple docstring''' import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor __UpperCAmelCase = logging.get_logger(__name__) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Dict , *lowerCamelCase_ : List[str] , **lowerCamelCase_ : Dict ): '''simple docstring''' warnings.warn( """The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use FlavaImageProcessor instead.""" , lowerCamelCase_ , ) super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
79
0
'''simple docstring''' import warnings from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch from ...models import UNetaDModel from ...schedulers import RePaintScheduler from ...utils import PIL_INTERPOLATION, logging, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput __UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name def __A ( lowerCamelCase_ ): """simple docstring""" warnings.warn( """The preprocess method is deprecated and will be removed in a future version. Please""" """ use VaeImageProcessor.preprocess instead""" , _lowerCamelCase , ) if isinstance(_lowerCamelCase , torch.Tensor ): return image elif isinstance(_lowerCamelCase , PIL.Image.Image ): SCREAMING_SNAKE_CASE : int = [image] if isinstance(image[0] , PIL.Image.Image ): SCREAMING_SNAKE_CASE : Union[str, Any] = image[0].size SCREAMING_SNAKE_CASE : List[Any] = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 SCREAMING_SNAKE_CASE : Any = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image] SCREAMING_SNAKE_CASE : Dict = np.concatenate(_lowerCamelCase , axis=0 ) SCREAMING_SNAKE_CASE : Any = np.array(_lowerCamelCase ).astype(np.floataa ) / 2_55.0 SCREAMING_SNAKE_CASE : Union[str, Any] = image.transpose(0 , 3 , 1 , 2 ) SCREAMING_SNAKE_CASE : Optional[int] = 2.0 * image - 1.0 SCREAMING_SNAKE_CASE : List[str] = torch.from_numpy(_lowerCamelCase ) elif isinstance(image[0] , torch.Tensor ): SCREAMING_SNAKE_CASE : Dict = torch.cat(_lowerCamelCase , dim=0 ) return image def __A ( lowerCamelCase_ ): """simple docstring""" if isinstance(_lowerCamelCase , torch.Tensor ): return mask elif isinstance(_lowerCamelCase , PIL.Image.Image ): SCREAMING_SNAKE_CASE : Any = [mask] if isinstance(mask[0] , PIL.Image.Image ): SCREAMING_SNAKE_CASE : Optional[int] = mask[0].size SCREAMING_SNAKE_CASE : List[Any] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 SCREAMING_SNAKE_CASE : List[str] = [np.array(m.convert("""L""" ).resize((w, h) , resample=PIL_INTERPOLATION["""nearest"""] ) )[None, :] for m in mask] SCREAMING_SNAKE_CASE : Dict = np.concatenate(_lowerCamelCase , axis=0 ) SCREAMING_SNAKE_CASE : Any = mask.astype(np.floataa ) / 2_55.0 SCREAMING_SNAKE_CASE : List[str] = 0 SCREAMING_SNAKE_CASE : Dict = 1 SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(_lowerCamelCase ) elif isinstance(mask[0] , torch.Tensor ): SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat(_lowerCamelCase , dim=0 ) return mask class UpperCamelCase__ ( __lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = 42 def __init__( self : Optional[Any] , lowerCamelCase_ : str , lowerCamelCase_ : List[str] ): '''simple docstring''' super().__init__() self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ ) @torch.no_grad() def __call__( self : Union[str, Any] , lowerCamelCase_ : Union[torch.Tensor, PIL.Image.Image] , lowerCamelCase_ : Union[torch.Tensor, PIL.Image.Image] , lowerCamelCase_ : int = 2_50 , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : int = 10 , lowerCamelCase_ : int = 10 , lowerCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ : Optional[str] = "pil" , lowerCamelCase_ : bool = True , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = image SCREAMING_SNAKE_CASE : Optional[Any] = _preprocess_image(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = original_image.to(device=self.device , dtype=self.unet.dtype ) SCREAMING_SNAKE_CASE : Dict = _preprocess_mask(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = mask_image.to(device=self.device , dtype=self.unet.dtype ) SCREAMING_SNAKE_CASE : str = original_image.shape[0] # sample gaussian noise to begin the loop if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and len(lowerCamelCase_ ) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(lowerCamelCase_ )}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) SCREAMING_SNAKE_CASE : List[str] = original_image.shape SCREAMING_SNAKE_CASE : List[Any] = randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , self.device ) SCREAMING_SNAKE_CASE : Tuple = eta SCREAMING_SNAKE_CASE : List[str] = self.scheduler.timesteps[0] + 1 SCREAMING_SNAKE_CASE : str = generator[0] if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else generator for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): if t < t_last: # predict the noise residual SCREAMING_SNAKE_CASE : Union[str, Any] = self.unet(lowerCamelCase_ , lowerCamelCase_ ).sample # compute previous image: x_t -> x_t-1 SCREAMING_SNAKE_CASE : Tuple = self.scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample else: # compute the reverse: x_t-1 -> x_t SCREAMING_SNAKE_CASE : List[str] = self.scheduler.undo_step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = t SCREAMING_SNAKE_CASE : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 ) SCREAMING_SNAKE_CASE : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": SCREAMING_SNAKE_CASE : str = self.numpy_to_pil(lowerCamelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCamelCase_ )
705
'''simple docstring''' from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging __UpperCAmelCase = logging.get_logger(__name__) class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = None @staticmethod def lowerCamelCase_ ( ): '''simple docstring''' raise NotImplementedError def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : str ): '''simple docstring''' raise NotImplementedError def lowerCamelCase_ ( self : int , lowerCamelCase_ : Dict ): '''simple docstring''' raise NotImplementedError def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' if not self.is_available(): raise RuntimeError( f'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' ) @classmethod def lowerCamelCase_ ( cls : Any ): '''simple docstring''' return f'''`pip install {cls.pip_package or cls.name}`''' class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''optuna''' @staticmethod def lowerCamelCase_ ( ): '''simple docstring''' return is_optuna_available() def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : Dict ): '''simple docstring''' return run_hp_search_optuna(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) def lowerCamelCase_ ( self : str , lowerCamelCase_ : Any ): '''simple docstring''' return default_hp_space_optuna(lowerCamelCase_ ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''ray''' SCREAMING_SNAKE_CASE__ = '''\'ray[tune]\'''' @staticmethod def lowerCamelCase_ ( ): '''simple docstring''' return is_ray_available() def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : int ): '''simple docstring''' return run_hp_search_ray(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Optional[int] ): '''simple docstring''' return default_hp_space_ray(lowerCamelCase_ ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''sigopt''' @staticmethod def lowerCamelCase_ ( ): '''simple docstring''' return is_sigopt_available() def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : int ): '''simple docstring''' return run_hp_search_sigopt(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' return default_hp_space_sigopt(lowerCamelCase_ ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''wandb''' @staticmethod def lowerCamelCase_ ( ): '''simple docstring''' return is_wandb_available() def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : Optional[Any] ): '''simple docstring''' return run_hp_search_wandb(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Tuple ): '''simple docstring''' return default_hp_space_wandb(lowerCamelCase_ ) __UpperCAmelCase = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(lowerCamelCase_ ) > 0: SCREAMING_SNAKE_CASE : List[Any] = available_backends[0].name if len(lowerCamelCase_ ) > 1: logger.info( f'''{len(lowerCamelCase_ )} hyperparameter search backends available. Using {name} as the default.''' ) return name raise RuntimeError( """No hyperparameter search backend available.\n""" + """\n""".join( f''' - To install {backend.name} run {backend.pip_install()}''' for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
79
0
'''simple docstring''' from argparse import ArgumentParser, Namespace from ..utils import logging from . import BaseTransformersCLICommand def __A ( lowerCamelCase_ ): """simple docstring""" return ConvertCommand( args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name ) __UpperCAmelCase = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n" class UpperCamelCase__ ( lowercase_ ): """simple docstring""" @staticmethod def lowerCamelCase_ ( lowerCamelCase_ : ArgumentParser ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = parser.add_parser( """convert""" , help="""CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.""" , ) train_parser.add_argument("""--model_type""" , type=__UpperCamelCase , required=__UpperCamelCase , help="""Model\'s type.""" ) train_parser.add_argument( """--tf_checkpoint""" , type=__UpperCamelCase , required=__UpperCamelCase , help="""TensorFlow checkpoint path or folder.""" ) train_parser.add_argument( """--pytorch_dump_output""" , type=__UpperCamelCase , required=__UpperCamelCase , help="""Path to the PyTorch saved model output.""" ) train_parser.add_argument("""--config""" , type=__UpperCamelCase , default="""""" , help="""Configuration file path or folder.""" ) train_parser.add_argument( """--finetuning_task_name""" , type=__UpperCamelCase , default=__UpperCamelCase , help="""Optional fine-tuning task name if the TF model was a finetuned model.""" , ) train_parser.set_defaults(func=__UpperCamelCase ) def __init__( self : Any , lowerCamelCase_ : str , lowerCamelCase_ : str , lowerCamelCase_ : str , lowerCamelCase_ : str , lowerCamelCase_ : str , *lowerCamelCase_ : str , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger("""transformers-cli/converting""" ) self._logger.info(f'''Loading model {model_type}''' ) SCREAMING_SNAKE_CASE : Tuple = model_type SCREAMING_SNAKE_CASE : Any = tf_checkpoint SCREAMING_SNAKE_CASE : Any = pytorch_dump_output SCREAMING_SNAKE_CASE : Optional[int] = config SCREAMING_SNAKE_CASE : List[Any] = finetuning_task_name def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' if self._model_type == "albert": try: from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(__UpperCamelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "bert": try: from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(__UpperCamelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "funnel": try: from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(__UpperCamelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "t5": try: from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: raise ImportError(__UpperCamelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "gpt": from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import ( convert_openai_checkpoint_to_pytorch, ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "transfo_xl": try: from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( convert_transfo_xl_checkpoint_to_pytorch, ) except ImportError: raise ImportError(__UpperCamelCase ) if "ckpt" in self._tf_checkpoint.lower(): SCREAMING_SNAKE_CASE : str = self._tf_checkpoint SCREAMING_SNAKE_CASE : List[str] = """""" else: SCREAMING_SNAKE_CASE : Any = self._tf_checkpoint SCREAMING_SNAKE_CASE : Union[str, Any] = """""" convert_transfo_xl_checkpoint_to_pytorch( __UpperCamelCase , self._config , self._pytorch_dump_output , __UpperCamelCase ) elif self._model_type == "gpt2": try: from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import ( convert_gpta_checkpoint_to_pytorch, ) except ImportError: raise ImportError(__UpperCamelCase ) convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "xlnet": try: from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import ( convert_xlnet_checkpoint_to_pytorch, ) except ImportError: raise ImportError(__UpperCamelCase ) convert_xlnet_checkpoint_to_pytorch( self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name ) elif self._model_type == "xlm": from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( convert_xlm_checkpoint_to_pytorch, ) convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "lxmert": from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import ( convert_lxmert_checkpoint_to_pytorch, ) convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "rembert": from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import ( convert_rembert_tf_checkpoint_to_pytorch, ) convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) else: raise ValueError( """--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]""" )
706
'''simple docstring''' import glob import os import random from string import ascii_lowercase, digits import cva __UpperCAmelCase = """""" __UpperCAmelCase = """""" __UpperCAmelCase = """""" __UpperCAmelCase = 1 # (0 is vertical, 1 is horizontal) def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = get_dataset(lowerCamelCase_ , lowerCamelCase_ ) print("""Processing...""" ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = update_image_and_anno(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) for index, image in enumerate(lowerCamelCase_ ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' SCREAMING_SNAKE_CASE : Optional[int] = random_chars(32 ) SCREAMING_SNAKE_CASE : Optional[Any] = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0] SCREAMING_SNAKE_CASE : Dict = f'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}''' cva.imwrite(f'''/{file_root}.jpg''' , lowerCamelCase_ , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f'''Success {index+1}/{len(lowerCamelCase_ )} with {file_name}''' ) SCREAMING_SNAKE_CASE : Optional[Any] = [] for anno in new_annos[index]: SCREAMING_SNAKE_CASE : Optional[Any] = f'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}''' annos_list.append(lowerCamelCase_ ) with open(f'''/{file_root}.txt''' , """w""" ) as outfile: outfile.write("""\n""".join(line for line in annos_list ) ) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = [] SCREAMING_SNAKE_CASE : Any = [] for label_file in glob.glob(os.path.join(lowerCamelCase_ , """*.txt""" ) ): SCREAMING_SNAKE_CASE : str = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0] with open(lowerCamelCase_ ) as in_file: SCREAMING_SNAKE_CASE : Any = in_file.readlines() SCREAMING_SNAKE_CASE : List[Any] = os.path.join(lowerCamelCase_ , f'''{label_name}.jpg''' ) SCREAMING_SNAKE_CASE : Tuple = [] for obj_list in obj_lists: SCREAMING_SNAKE_CASE : Union[str, Any] = obj_list.rstrip("""\n""" ).split(""" """ ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(lowerCamelCase_ ) labels.append(lowerCamelCase_ ) return img_paths, labels def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = [] SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : Optional[Any] = [] for idx in range(len(lowerCamelCase_ ) ): SCREAMING_SNAKE_CASE : List[Any] = [] SCREAMING_SNAKE_CASE : Dict = img_list[idx] path_list.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = anno_list[idx] SCREAMING_SNAKE_CASE : Optional[Any] = cva.imread(lowerCamelCase_ ) if flip_type == 1: SCREAMING_SNAKE_CASE : List[str] = cva.flip(lowerCamelCase_ , lowerCamelCase_ ) for bbox in img_annos: SCREAMING_SNAKE_CASE : List[Any] = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: SCREAMING_SNAKE_CASE : Any = cva.flip(lowerCamelCase_ , lowerCamelCase_ ) for bbox in img_annos: SCREAMING_SNAKE_CASE : Optional[Any] = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(lowerCamelCase_ ) new_imgs_list.append(lowerCamelCase_ ) return new_imgs_list, new_annos_lists, path_list def __A ( lowerCamelCase_ = 32 ): """simple docstring""" assert number_char > 1, "The number of character should greater than 1" SCREAMING_SNAKE_CASE : Dict = ascii_lowercase + digits return "".join(random.choice(lowerCamelCase_ ) for _ in range(lowerCamelCase_ ) ) if __name__ == "__main__": main() print("""DONE ✅""")
79
0
'''simple docstring''' import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class UpperCamelCase__ ( A_ , A_ ): """simple docstring""" @register_to_config def __init__( self : Optional[int] , *, lowerCamelCase_ : Union[str, Any] = 4 , lowerCamelCase_ : Optional[Any] = 7_68 , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict , ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : str = nn.Parameter(torch.zeros(lowerCamelCase_ ) ) # parameters for additional clip time embeddings SCREAMING_SNAKE_CASE : Tuple = nn.Linear(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(lowerCamelCase_ , lowerCamelCase_ ) # parameters for encoder hidden states SCREAMING_SNAKE_CASE : Any = clip_extra_context_tokens SCREAMING_SNAKE_CASE : List[str] = nn.Linear( lowerCamelCase_ , self.clip_extra_context_tokens * cross_attention_dim ) SCREAMING_SNAKE_CASE : str = nn.Linear(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = nn.LayerNorm(lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict , *, lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Tuple ): '''simple docstring''' if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings SCREAMING_SNAKE_CASE : Tuple = image_embeddings.shape[0] SCREAMING_SNAKE_CASE : Tuple = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 ) SCREAMING_SNAKE_CASE : int = classifier_free_guidance_embeddings.expand( lowerCamelCase_ , -1 ) SCREAMING_SNAKE_CASE : Optional[int] = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 ) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] SCREAMING_SNAKE_CASE : Dict = prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... SCREAMING_SNAKE_CASE : Union[str, Any] = self.embedding_proj(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = self.clip_image_embeddings_project_to_time_embeddings(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" SCREAMING_SNAKE_CASE : List[Any] = self.clip_extra_context_tokens_proj(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = clip_extra_context_tokens.reshape(lowerCamelCase_ , -1 , self.clip_extra_context_tokens ) SCREAMING_SNAKE_CASE : Dict = clip_extra_context_tokens.permute(0 , 2 , 1 ) SCREAMING_SNAKE_CASE : List[Any] = self.encoder_hidden_states_proj(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = self.text_encoder_hidden_states_norm(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 ) return text_encoder_hidden_states, additive_clip_time_embeddings
707
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { """google/vivit-b-16x2-kinetics400""": ( """https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json""" ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''vivit''' def __init__( self : Tuple , lowerCamelCase_ : str=2_24 , lowerCamelCase_ : List[Any]=32 , lowerCamelCase_ : Tuple=[2, 16, 16] , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Dict=7_68 , lowerCamelCase_ : Dict=12 , lowerCamelCase_ : Any=12 , lowerCamelCase_ : List[Any]=30_72 , lowerCamelCase_ : List[str]="gelu_fast" , lowerCamelCase_ : str=0.0 , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : Optional[int]=0.02 , lowerCamelCase_ : List[Any]=1e-06 , lowerCamelCase_ : Tuple=True , **lowerCamelCase_ : Tuple , ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = hidden_size SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers SCREAMING_SNAKE_CASE : List[str] = num_attention_heads SCREAMING_SNAKE_CASE : str = intermediate_size SCREAMING_SNAKE_CASE : List[Any] = hidden_act SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Tuple = initializer_range SCREAMING_SNAKE_CASE : str = layer_norm_eps SCREAMING_SNAKE_CASE : str = image_size SCREAMING_SNAKE_CASE : Dict = num_frames SCREAMING_SNAKE_CASE : Optional[Any] = tubelet_size SCREAMING_SNAKE_CASE : Dict = num_channels SCREAMING_SNAKE_CASE : int = qkv_bias super().__init__(**lowerCamelCase_ )
79
0
'''simple docstring''' import os from bleurt import score # From: git+https://github.com/google-research/bleurt.git import datasets __UpperCAmelCase = datasets.logging.get_logger(__name__) __UpperCAmelCase = """\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n""" __UpperCAmelCase = """\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n""" __UpperCAmelCase = """\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> bleurt = datasets.load_metric(\"bleurt\")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [1.03, 1.04]\n""" __UpperCAmelCase = { """bleurt-tiny-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip""", """bleurt-tiny-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip""", """bleurt-base-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip""", """bleurt-base-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip""", """bleurt-large-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip""", """bleurt-large-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip""", """BLEURT-20-D3""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip""", """BLEURT-20-D6""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip""", """BLEURT-20-D12""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip""", """BLEURT-20""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip""", } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase__ ( datasets.Metric ): """simple docstring""" def lowerCamelCase_ ( self : Any ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/google-research/bleurt""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , codebase_urls=["""https://github.com/google-research/bleurt"""] , reference_urls=["""https://github.com/google-research/bleurt""", """https://arxiv.org/abs/2004.04696"""] , ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' if self.config_name == "default": logger.warning( """Using default BLEURT-Base checkpoint for sequence maximum length 128. """ """You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').""" ) SCREAMING_SNAKE_CASE : List[Any] = 'bleurt-base-128' if self.config_name.lower() in CHECKPOINT_URLS: SCREAMING_SNAKE_CASE : Tuple = self.config_name.lower() elif self.config_name.upper() in CHECKPOINT_URLS: SCREAMING_SNAKE_CASE : Any = self.config_name.upper() else: raise KeyError( f'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''' ) # download the model checkpoint specified by self.config_name and set up the scorer SCREAMING_SNAKE_CASE : List[Any] = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] ) SCREAMING_SNAKE_CASE : int = score.BleurtScorer(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.scorer.score(references=UpperCamelCase__ , candidates=UpperCamelCase__ ) return {"scores": scores}
708
'''simple docstring''' import math class UpperCamelCase__ : """simple docstring""" def __init__( self : List[str] , lowerCamelCase_ : Tuple=0 ): # a graph with Node 0,1,...,N-1 '''simple docstring''' SCREAMING_SNAKE_CASE : Any = n SCREAMING_SNAKE_CASE : Optional[int] = [ [math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ ) ] # adjacency matrix for weight SCREAMING_SNAKE_CASE : Union[str, Any] = [ [math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ ) ] # dp[i][j] stores minimum distance from i to j def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = w def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): SCREAMING_SNAKE_CASE : Union[str, Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' return self.dp[u][v] if __name__ == "__main__": __UpperCAmelCase = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
79
0
'''simple docstring''' from .configuration_bert_masked import MaskedBertConfig from .modeling_bert_masked import ( MaskedBertForMultipleChoice, MaskedBertForQuestionAnswering, MaskedBertForSequenceClassification, MaskedBertForTokenClassification, MaskedBertModel, ) from .modules import *
709
'''simple docstring''' import math def __A ( lowerCamelCase_ ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __A ( lowerCamelCase_ = 1_00_01 ): """simple docstring""" try: SCREAMING_SNAKE_CASE : Tuple = int(lowerCamelCase_ ) except (TypeError, ValueError): raise TypeError("""Parameter nth must be int or castable to int.""" ) from None if nth <= 0: raise ValueError("""Parameter nth must be greater than or equal to one.""" ) SCREAMING_SNAKE_CASE : list[int] = [] SCREAMING_SNAKE_CASE : Dict = 2 while len(lowerCamelCase_ ) < nth: if is_prime(lowerCamelCase_ ): primes.append(lowerCamelCase_ ) num += 1 else: num += 1 return primes[len(lowerCamelCase_ ) - 1] if __name__ == "__main__": print(f'''{solution() = }''')
79
0
import pprint import requests __UpperCAmelCase = """https://zenquotes.io/api""" def __A ( ): """simple docstring""" return requests.get(API_ENDPOINT_URL + """/today""" ).json() def __A ( ): """simple docstring""" return requests.get(API_ENDPOINT_URL + """/random""" ).json() if __name__ == "__main__": __UpperCAmelCase = random_quotes() pprint.pprint(response)
710
'''simple docstring''' from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent __UpperCAmelCase = {"""UserAgent""": UserAgent().random} def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = script.contents[0] SCREAMING_SNAKE_CASE : int = json.loads(data[data.find("""{\"config\"""" ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class UpperCamelCase__ : """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = f'''https://www.instagram.com/{username}/''' SCREAMING_SNAKE_CASE : Any = self.get_json() def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = requests.get(self.url , headers=lowerCamelCase_ ).text SCREAMING_SNAKE_CASE : List[Any] = BeautifulSoup(lowerCamelCase_ , """html.parser""" ).find_all("""script""" ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__( self : Dict ): '''simple docstring''' return f'''{self.__class__.__name__}(\'{self.username}\')''' def __str__( self : int ): '''simple docstring''' return f'''{self.fullname} ({self.username}) is {self.biography}''' @property def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' return self.user_data["username"] @property def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' return self.user_data["full_name"] @property def lowerCamelCase_ ( self : int ): '''simple docstring''' return self.user_data["biography"] @property def lowerCamelCase_ ( self : str ): '''simple docstring''' return self.user_data["business_email"] @property def lowerCamelCase_ ( self : str ): '''simple docstring''' return self.user_data["external_url"] @property def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return self.user_data["edge_followed_by"]["count"] @property def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return self.user_data["edge_follow"]["count"] @property def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return self.user_data["edge_owner_to_timeline_media"]["count"] @property def lowerCamelCase_ ( self : str ): '''simple docstring''' return self.user_data["profile_pic_url_hd"] @property def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return self.user_data["is_verified"] @property def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' return self.user_data["is_private"] def __A ( lowerCamelCase_ = "github" ): """simple docstring""" import os if os.environ.get("""CI""" ): return # test failing on GitHub Actions SCREAMING_SNAKE_CASE : Any = InstagramUser(lowerCamelCase_ ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , lowerCamelCase_ ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 1_50 assert instagram_user.number_of_followers > 12_00_00 assert instagram_user.number_of_followings > 15 assert instagram_user.email == "support@github.com" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith("""https://instagram.""" ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() __UpperCAmelCase = InstagramUser("""github""") print(instagram_user) print(f'''{instagram_user.number_of_posts = }''') print(f'''{instagram_user.number_of_followers = }''') print(f'''{instagram_user.number_of_followings = }''') print(f'''{instagram_user.email = }''') print(f'''{instagram_user.website = }''') print(f'''{instagram_user.profile_picture_url = }''') print(f'''{instagram_user.is_verified = }''') print(f'''{instagram_user.is_private = }''')
79
0
'''simple docstring''' import argparse import os import platform import numpy as np import psutil import torch from accelerate import __version__ as version from accelerate.commands.config import default_config_file, load_config_from_file from ..utils import is_npu_available, is_xpu_available def __A ( lowerCamelCase_=None ): """simple docstring""" if subparsers is not None: SCREAMING_SNAKE_CASE : int = subparsers.add_parser("""env""" ) else: SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser("""Accelerate env command""" ) parser.add_argument( """--config_file""" , default=__lowerCAmelCase , help="""The config file to use for the default values in the launching script.""" ) if subparsers is not None: parser.set_defaults(func=__lowerCAmelCase ) return parser def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = torch.__version__ SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cuda.is_available() SCREAMING_SNAKE_CASE : str = is_xpu_available() SCREAMING_SNAKE_CASE : int = is_npu_available() SCREAMING_SNAKE_CASE : List[Any] = """Not found""" # Get the default from the config file. if args.config_file is not None or os.path.isfile(__lowerCAmelCase ): SCREAMING_SNAKE_CASE : int = load_config_from_file(args.config_file ).to_dict() SCREAMING_SNAKE_CASE : Union[str, Any] = { """`Accelerate` version""": version, """Platform""": platform.platform(), """Python version""": platform.python_version(), """Numpy version""": np.__version__, """PyTorch version (GPU?)""": f'''{pt_version} ({pt_cuda_available})''', """PyTorch XPU available""": str(__lowerCAmelCase ), """PyTorch NPU available""": str(__lowerCAmelCase ), """System RAM""": f'''{psutil.virtual_memory().total / 10_24 ** 3:.2f} GB''', } if pt_cuda_available: SCREAMING_SNAKE_CASE : List[str] = torch.cuda.get_device_name() print("""\nCopy-and-paste the text below in your GitHub issue\n""" ) print("""\n""".join([f'''- {prop}: {val}''' for prop, val in info.items()] ) ) print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""" ) SCREAMING_SNAKE_CASE : List[Any] = ( """\n""".join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else f'''\t{accelerate_config}''' ) print(__lowerCAmelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = accelerate_config return info def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = env_command_parser() SCREAMING_SNAKE_CASE : Any = parser.parse_args() env_command(__lowerCAmelCase ) return 0 if __name__ == "__main__": raise SystemExit(main())
711
'''simple docstring''' import argparse import logging from collections import namedtuple import torch from model_bertabs import BertAbsSummarizer from models.model_builder import AbsSummarizer # The authors' implementation from transformers import BertTokenizer logging.basicConfig(level=logging.INFO) __UpperCAmelCase = logging.getLogger(__name__) __UpperCAmelCase = """Hello world! cécé herlolip""" __UpperCAmelCase = namedtuple( """BertAbsConfig""", [ """temp_dir""", """large""", """use_bert_emb""", """finetune_bert""", """encoder""", """share_emb""", """max_pos""", """enc_layers""", """enc_hidden_size""", """enc_heads""", """enc_ff_size""", """enc_dropout""", """dec_layers""", """dec_hidden_size""", """dec_heads""", """dec_ff_size""", """dec_dropout""", ], ) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = BertAbsConfig( temp_dir=""".""" , finetune_bert=lowerCamelCase_ , large=lowerCamelCase_ , share_emb=lowerCamelCase_ , use_bert_emb=lowerCamelCase_ , encoder="""bert""" , max_pos=5_12 , enc_layers=6 , enc_hidden_size=5_12 , enc_heads=8 , enc_ff_size=5_12 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_68 , dec_heads=8 , dec_ff_size=20_48 , dec_dropout=0.2 , ) SCREAMING_SNAKE_CASE : int = torch.load(lowerCamelCase_ , lambda lowerCamelCase_ , lowerCamelCase_ : storage ) SCREAMING_SNAKE_CASE : List[str] = AbsSummarizer(lowerCamelCase_ , torch.device("""cpu""" ) , lowerCamelCase_ ) original.eval() SCREAMING_SNAKE_CASE : Optional[int] = BertAbsSummarizer(lowerCamelCase_ , torch.device("""cpu""" ) ) new_model.eval() # ------------------- # Convert the weights # ------------------- logging.info("""convert the model""" ) new_model.bert.load_state_dict(original.bert.state_dict() ) new_model.decoder.load_state_dict(original.decoder.state_dict() ) new_model.generator.load_state_dict(original.generator.state_dict() ) # ---------------------------------- # Make sure the outpus are identical # ---------------------------------- logging.info("""Make sure that the models' outputs are identical""" ) SCREAMING_SNAKE_CASE : int = BertTokenizer.from_pretrained("""bert-base-uncased""" ) # prepare the model inputs SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.encode("""This is sample éàalj'-.""" ) encoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(lowerCamelCase_ )) ) SCREAMING_SNAKE_CASE : List[str] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 ) SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode("""This is sample 3 éàalj'-.""" ) decoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(lowerCamelCase_ )) ) SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 ) # failsafe to make sure the weights reset does not affect the # loaded weights. assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0 # forward pass SCREAMING_SNAKE_CASE : Optional[int] = encoder_input_ids SCREAMING_SNAKE_CASE : Optional[Any] = decoder_input_ids SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : Tuple = None SCREAMING_SNAKE_CASE : int = None SCREAMING_SNAKE_CASE : List[Any] = None SCREAMING_SNAKE_CASE : Optional[int] = None # The original model does not apply the geneator layer immediatly but rather in # the beam search (where it combines softmax + linear layer). Since we already # apply the softmax in our generation process we only apply the linear layer here. # We make sure that the outputs of the full stack are identical SCREAMING_SNAKE_CASE : str = original(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0] SCREAMING_SNAKE_CASE : Optional[Any] = original.generator(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = new_model( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0] SCREAMING_SNAKE_CASE : str = new_model.generator(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = torch.max(torch.abs(output_converted_model - output_original_model ) ).item() print("""Maximum absolute difference beween weights: {:.2f}""".format(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Optional[int] = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item() print("""Maximum absolute difference beween weights: {:.2f}""".format(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Any = torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) if are_identical: logging.info("""all weights are equal up to 1e-3""" ) else: raise ValueError("""the weights are different. The new model is likely different from the original one.""" ) # The model has been saved with torch.save(model) and this is bound to the exact # directory structure. We save the state_dict instead. logging.info("""saving the model's state dictionary""" ) torch.save( new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument( """--bertabs_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""", ) __UpperCAmelCase = parser.parse_args() convert_bertabs_checkpoints( args.bertabs_checkpoint_path, args.pytorch_dump_folder_path, )
79
0
'''simple docstring''' import pytest from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs @pytest.mark.parametrize( """kwargs, expected""" , [ ({"""num_shards""": 0, """max_num_jobs""": 1}, []), ({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]), ({"""num_shards""": 10, """max_num_jobs""": 10}, [range(a_ , i + 1 ) for i in range(10 )]), ({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]), ({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]), ({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]), ] , ) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = _distribute_shards(**a_ ) assert out == expected @pytest.mark.parametrize( """gen_kwargs, max_num_jobs, expected""" , [ ({"""foo""": 0}, 10, [{"""foo""": 0}]), ({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]), ({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]), ({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]), ({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]), ] , ) def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = _split_gen_kwargs(a_ , a_ ) assert out == expected @pytest.mark.parametrize( """gen_kwargs, expected""" , [ ({"""foo""": 0}, 1), ({"""shards""": [0]}, 1), ({"""shards""": [0, 1, 2, 3]}, 4), ({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4), ({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4), ({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError), ] , ) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" if expected is RuntimeError: with pytest.raises(a_ ): _number_of_shards_in_gen_kwargs(a_ ) else: SCREAMING_SNAKE_CASE : Dict = _number_of_shards_in_gen_kwargs(a_ ) assert out == expected
712
'''simple docstring''' import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=True , lowerCamelCase_="pt" ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = {"""add_prefix_space""": True} if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and not line.startswith(""" """ ) else {} SCREAMING_SNAKE_CASE : Optional[Any] = padding_side return tokenizer( [line] , max_length=lowerCamelCase_ , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase_ , return_tensors=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , **lowerCamelCase_ , ) def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , ): """simple docstring""" SCREAMING_SNAKE_CASE : int = input_ids.ne(lowerCamelCase_ ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str]="train" , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : int=None , lowerCamelCase_ : Union[str, Any]="" , ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : str = Path(lowerCamelCase_ ).joinpath(type_path + """.source""" ) SCREAMING_SNAKE_CASE : Optional[Any] = Path(lowerCamelCase_ ).joinpath(type_path + """.target""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_char_lens(self.src_file ) SCREAMING_SNAKE_CASE : int = max_source_length SCREAMING_SNAKE_CASE : str = max_target_length assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}''' SCREAMING_SNAKE_CASE : List[str] = tokenizer SCREAMING_SNAKE_CASE : Dict = prefix if n_obs is not None: SCREAMING_SNAKE_CASE : List[Any] = self.src_lens[:n_obs] SCREAMING_SNAKE_CASE : int = src_lang SCREAMING_SNAKE_CASE : Optional[int] = tgt_lang def __len__( self : List[Any] ): '''simple docstring''' return len(self.src_lens ) def __getitem__( self : Union[str, Any] , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = index + 1 # linecache starts at 1 SCREAMING_SNAKE_CASE : Dict = self.prefix + linecache.getline(str(self.src_file ) , lowerCamelCase_ ).rstrip("""\n""" ) SCREAMING_SNAKE_CASE : Dict = linecache.getline(str(self.tgt_file ) , lowerCamelCase_ ).rstrip("""\n""" ) assert source_line, f'''empty source line for index {index}''' assert tgt_line, f'''empty tgt line for index {index}''' # Need to add eos token manually for T5 if isinstance(self.tokenizer , lowerCamelCase_ ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right SCREAMING_SNAKE_CASE : Union[str, Any] = ( self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCamelCase_ ) else self.tokenizer ) SCREAMING_SNAKE_CASE : Any = self.tokenizer.generator if isinstance(self.tokenizer , lowerCamelCase_ ) else self.tokenizer SCREAMING_SNAKE_CASE : Optional[int] = encode_line(lowerCamelCase_ , lowerCamelCase_ , self.max_source_length , """right""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = encode_line(lowerCamelCase_ , lowerCamelCase_ , self.max_target_length , """right""" ) SCREAMING_SNAKE_CASE : Tuple = source_inputs["""input_ids"""].squeeze() SCREAMING_SNAKE_CASE : Tuple = target_inputs["""input_ids"""].squeeze() SCREAMING_SNAKE_CASE : List[str] = source_inputs["""attention_mask"""].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def lowerCamelCase_ ( lowerCamelCase_ : Dict ): '''simple docstring''' return [len(lowerCamelCase_ ) for x in Path(lowerCamelCase_ ).open().readlines()] def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = torch.stack([x["""input_ids"""] for x in batch] ) SCREAMING_SNAKE_CASE : int = torch.stack([x["""attention_mask"""] for x in batch] ) SCREAMING_SNAKE_CASE : int = torch.stack([x["""decoder_input_ids"""] for x in batch] ) SCREAMING_SNAKE_CASE : int = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer , lowerCamelCase_ ) else self.tokenizer.pad_token_id ) SCREAMING_SNAKE_CASE : Dict = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer , lowerCamelCase_ ) else self.tokenizer.pad_token_id ) SCREAMING_SNAKE_CASE : Optional[int] = trim_batch(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = trim_batch(lowerCamelCase_ , lowerCamelCase_ , attention_mask=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = { """input_ids""": source_ids, """attention_mask""": source_mask, """decoder_input_ids""": y, } return batch __UpperCAmelCase = getLogger(__name__) def __A ( lowerCamelCase_ ): """simple docstring""" return list(itertools.chain.from_iterable(lowerCamelCase_ ) ) def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = get_git_info() save_json(lowerCamelCase_ , os.path.join(lowerCamelCase_ , """git_log.json""" ) ) def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=4 , **lowerCamelCase_ ): """simple docstring""" with open(lowerCamelCase_ , """w""" ) as f: json.dump(lowerCamelCase_ , lowerCamelCase_ , indent=lowerCamelCase_ , **lowerCamelCase_ ) def __A ( lowerCamelCase_ ): """simple docstring""" with open(lowerCamelCase_ ) as f: return json.load(lowerCamelCase_ ) def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = git.Repo(search_parent_directories=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = { """repo_id""": str(lowerCamelCase_ ), """repo_sha""": str(repo.head.object.hexsha ), """repo_branch""": str(repo.active_branch ), """hostname""": str(socket.gethostname() ), } return repo_infos def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return list(map(lowerCamelCase_ , lowerCamelCase_ ) ) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" with open(lowerCamelCase_ , """wb""" ) as f: return pickle.dump(lowerCamelCase_ , lowerCamelCase_ ) def __A ( lowerCamelCase_ ): """simple docstring""" def remove_articles(lowerCamelCase_ ): return re.sub(R"""\b(a|an|the)\b""" , """ """ , lowerCamelCase_ ) def white_space_fix(lowerCamelCase_ ): return " ".join(text.split() ) def remove_punc(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : int = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(lowerCamelCase_ ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase_ ) ) ) ) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = normalize_answer(lowerCamelCase_ ).split() SCREAMING_SNAKE_CASE : Optional[int] = normalize_answer(lowerCamelCase_ ).split() SCREAMING_SNAKE_CASE : Tuple = Counter(lowerCamelCase_ ) & Counter(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = sum(common.values() ) if num_same == 0: return 0 SCREAMING_SNAKE_CASE : Optional[int] = 1.0 * num_same / len(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = 1.0 * num_same / len(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = (2 * precision * recall) / (precision + recall) return fa def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return normalize_answer(lowerCamelCase_ ) == normalize_answer(lowerCamelCase_ ) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = 0 for hypo, pred in zip(lowerCamelCase_ , lowerCamelCase_ ): em += exact_match_score(lowerCamelCase_ , lowerCamelCase_ ) if len(lowerCamelCase_ ) > 0: em /= len(lowerCamelCase_ ) return {"em": em} def __A ( lowerCamelCase_ ): """simple docstring""" return model_prefix.startswith("""rag""" ) def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead SCREAMING_SNAKE_CASE : Dict = """dropout_rate""" for p in extra_params: if getattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): if not hasattr(lowerCamelCase_ , lowerCamelCase_ ) and not hasattr(lowerCamelCase_ , equivalent_param[p] ): logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase_ ) ) delattr(lowerCamelCase_ , lowerCamelCase_ ) continue SCREAMING_SNAKE_CASE : Dict = p if hasattr(lowerCamelCase_ , lowerCamelCase_ ) else equivalent_param[p] setattr(lowerCamelCase_ , lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) ) delattr(lowerCamelCase_ , lowerCamelCase_ ) return hparams, config
79
0
'''simple docstring''' def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = [False] * len(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : Any = [] queue.append(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : Optional[Any] = True while queue: SCREAMING_SNAKE_CASE : int = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : Optional[Any] = True SCREAMING_SNAKE_CASE : Optional[Any] = u return visited[t] def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = [-1] * (len(UpperCAmelCase__ )) SCREAMING_SNAKE_CASE : int = 0 while bfs(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): SCREAMING_SNAKE_CASE : Optional[Any] = float("""Inf""" ) SCREAMING_SNAKE_CASE : Optional[Any] = sink while s != source: # Find the minimum value in select path SCREAMING_SNAKE_CASE : Any = min(UpperCAmelCase__ , graph[parent[s]][s] ) SCREAMING_SNAKE_CASE : List[str] = parent[s] max_flow += path_flow SCREAMING_SNAKE_CASE : List[str] = sink while v != source: SCREAMING_SNAKE_CASE : int = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow SCREAMING_SNAKE_CASE : List[Any] = parent[v] return max_flow __UpperCAmelCase = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] __UpperCAmelCase , __UpperCAmelCase = 0, 5 print(ford_fulkerson(graph, source, sink))
713
'''simple docstring''' def __A ( lowerCamelCase_ ): """simple docstring""" return 1 if digit in (0, 1) else (digit * factorial(digit - 1 )) def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = 0 SCREAMING_SNAKE_CASE : List[str] = number while duplicate > 0: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = divmod(lowerCamelCase_ , 10 ) fact_sum += factorial(lowerCamelCase_ ) return fact_sum == number if __name__ == "__main__": print("""Program to check whether a number is a Krisnamurthy Number or not.""") __UpperCAmelCase = int(input("""Enter number: """).strip()) print( f'''{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number.''' )
79
0
import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class UpperCamelCase__ ( lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline''' def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : List[str]=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(__UpperCamelCase ) ) SCREAMING_SNAKE_CASE : Any = np.random.RandomState(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Any = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 3, """strength""": 0.75, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs() SCREAMING_SNAKE_CASE : List[str] = pipe(**__UpperCamelCase ).images SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 1_28, 1_28, 3) SCREAMING_SNAKE_CASE : List[Any] = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) SCREAMING_SNAKE_CASE : Any = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) SCREAMING_SNAKE_CASE : int = self.get_dummy_inputs() SCREAMING_SNAKE_CASE : Optional[int] = pipe(**__UpperCamelCase ).images SCREAMING_SNAKE_CASE : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) SCREAMING_SNAKE_CASE : Any = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) SCREAMING_SNAKE_CASE : List[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) # warmup pass to apply optimizations SCREAMING_SNAKE_CASE : str = pipe(**self.get_dummy_inputs() ) SCREAMING_SNAKE_CASE : int = self.get_dummy_inputs() SCREAMING_SNAKE_CASE : Union[str, Any] = pipe(**__UpperCamelCase ).images SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) SCREAMING_SNAKE_CASE : Optional[Any] = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowerCamelCase_ ( self : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) SCREAMING_SNAKE_CASE : Any = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) SCREAMING_SNAKE_CASE : str = self.get_dummy_inputs() SCREAMING_SNAKE_CASE : Any = pipe(**__UpperCamelCase ).images SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) SCREAMING_SNAKE_CASE : Optional[Any] = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) SCREAMING_SNAKE_CASE : str = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) SCREAMING_SNAKE_CASE : Any = self.get_dummy_inputs() SCREAMING_SNAKE_CASE : Optional[int] = pipe(**__UpperCamelCase ).images SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) SCREAMING_SNAKE_CASE : str = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs() SCREAMING_SNAKE_CASE : Any = pipe(**__UpperCamelCase ).images SCREAMING_SNAKE_CASE : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) SCREAMING_SNAKE_CASE : str = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @property def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = ort.SessionOptions() SCREAMING_SNAKE_CASE : Any = False return options def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) SCREAMING_SNAKE_CASE : Optional[Any] = init_image.resize((7_68, 5_12) ) # using the PNDM scheduler by default SCREAMING_SNAKE_CASE : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained( """CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = """A fantasy landscape, trending on artstation""" SCREAMING_SNAKE_CASE : Tuple = np.random.RandomState(0 ) SCREAMING_SNAKE_CASE : int = pipe( prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type="""np""" , ) SCREAMING_SNAKE_CASE : int = output.images SCREAMING_SNAKE_CASE : Any = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 7_68, 3) SCREAMING_SNAKE_CASE : int = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) SCREAMING_SNAKE_CASE : str = init_image.resize((7_68, 5_12) ) SCREAMING_SNAKE_CASE : Optional[int] = LMSDiscreteScheduler.from_pretrained( """runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" ) SCREAMING_SNAKE_CASE : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = """A fantasy landscape, trending on artstation""" SCREAMING_SNAKE_CASE : str = np.random.RandomState(0 ) SCREAMING_SNAKE_CASE : Union[str, Any] = pipe( prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCamelCase , output_type="""np""" , ) SCREAMING_SNAKE_CASE : Any = output.images SCREAMING_SNAKE_CASE : List[Any] = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 7_68, 3) SCREAMING_SNAKE_CASE : Tuple = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
714
'''simple docstring''' # Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING import numpy as np import pyarrow as pa from .. import config from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import torch class UpperCamelCase__ ( TensorFormatter[Mapping, '''torch.Tensor''', Mapping] ): """simple docstring""" def __init__( self : Tuple , lowerCamelCase_ : str=None , **lowerCamelCase_ : Dict ): '''simple docstring''' super().__init__(features=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = torch_tensor_kwargs import torch # noqa import torch at initialization def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' import torch if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and column: if all( isinstance(lowerCamelCase_ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return torch.stack(lowerCamelCase_ ) return column def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : int ): '''simple docstring''' import torch if isinstance(lowerCamelCase_ , (str, bytes, type(lowerCamelCase_ )) ): return value elif isinstance(lowerCamelCase_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() SCREAMING_SNAKE_CASE : str = {} if isinstance(lowerCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): SCREAMING_SNAKE_CASE : Any = {"""dtype""": torch.intaa} elif isinstance(lowerCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): SCREAMING_SNAKE_CASE : int = {"""dtype""": torch.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(lowerCamelCase_ , PIL.Image.Image ): SCREAMING_SNAKE_CASE : List[Any] = np.asarray(lowerCamelCase_ ) return torch.tensor(lowerCamelCase_ , **{**default_dtype, **self.torch_tensor_kwargs} ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' import torch # support for torch, tf, jax etc. if hasattr(lowerCamelCase_ , """__array__""" ) and not isinstance(lowerCamelCase_ , torch.Tensor ): SCREAMING_SNAKE_CASE : Dict = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(lowerCamelCase_ , np.ndarray ): if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(lowerCamelCase_ ) for substruct in data_struct] ) elif isinstance(lowerCamelCase_ , (list, tuple) ): return self._consolidate([self.recursive_tensorize(lowerCamelCase_ ) for substruct in data_struct] ) return self._tensorize(lowerCamelCase_ ) def lowerCamelCase_ ( self : int , lowerCamelCase_ : dict ): '''simple docstring''' return map_nested(self._recursive_tensorize , lowerCamelCase_ , map_list=lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : pa.Table ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.numpy_arrow_extractor().extract_row(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = self.python_features_decoder.decode_row(lowerCamelCase_ ) return self.recursive_tensorize(lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : pa.Table ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.numpy_arrow_extractor().extract_column(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = self.python_features_decoder.decode_column(lowerCamelCase_ , pa_table.column_names[0] ) SCREAMING_SNAKE_CASE : List[str] = self.recursive_tensorize(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = self._consolidate(lowerCamelCase_ ) return column def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : pa.Table ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.numpy_arrow_extractor().extract_batch(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = self.python_features_decoder.decode_batch(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = self.recursive_tensorize(lowerCamelCase_ ) for column_name in batch: SCREAMING_SNAKE_CASE : Tuple = self._consolidate(batch[column_name] ) return batch
79
0
'''simple docstring''' class UpperCamelCase__ : """simple docstring""" def __init__( self : List[Any] , lowerCamelCase_ : int = "" , lowerCamelCase_ : str = False ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = {} # A node will be a leaf if the tree contains its word SCREAMING_SNAKE_CASE : Optional[Any] = is_leaf SCREAMING_SNAKE_CASE : Tuple = prefix def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = 0 for q, w in zip(self.prefix , lowerCamelCase_ ): if q != w: break x += 1 return self.prefix[:x], self.prefix[x:], word[x:] def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[str] ): '''simple docstring''' for word in words: self.insert(lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' if self.prefix == word: SCREAMING_SNAKE_CASE : Any = True # Case 2: The node has no edges that have a prefix to the word # Solution: We create an edge from the current node to a new one # containing the word elif word[0] not in self.nodes: SCREAMING_SNAKE_CASE : List[Any] = RadixNode(prefix=lowerCamelCase_ , is_leaf=lowerCamelCase_ ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = self.nodes[word[0]] SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = incoming_node.match( lowerCamelCase_ ) # Case 3: The node prefix is equal to the matching # Solution: We insert remaining word on the next node if remaining_prefix == "": self.nodes[matching_string[0]].insert(lowerCamelCase_ ) # Case 4: The word is greater equal to the matching # Solution: Create a node in between both nodes, change # prefixes and add the new node for the remaining word else: SCREAMING_SNAKE_CASE : int = remaining_prefix SCREAMING_SNAKE_CASE : Optional[int] = self.nodes[matching_string[0]] SCREAMING_SNAKE_CASE : Union[str, Any] = RadixNode(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = aux_node if remaining_word == "": SCREAMING_SNAKE_CASE : Optional[Any] = True else: self.nodes[matching_string[0]].insert(lowerCamelCase_ ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.nodes.get(word[0] , lowerCamelCase_ ) if not incoming_node: return False else: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = incoming_node.match( lowerCamelCase_ ) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # This applies when the word and the prefix are equal elif remaining_word == "": return incoming_node.is_leaf # We have word remaining so we check the next node else: return incoming_node.find(lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.nodes.get(word[0] , lowerCamelCase_ ) if not incoming_node: return False else: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = incoming_node.match( lowerCamelCase_ ) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # We have word remaining so we check the next node elif remaining_word != "": return incoming_node.delete(lowerCamelCase_ ) else: # If it is not a leaf, we don't have to delete if not incoming_node.is_leaf: return False else: # We delete the nodes if no edges go from it if len(incoming_node.nodes ) == 0: del self.nodes[word[0]] # We merge the current node with its only child if len(self.nodes ) == 1 and not self.is_leaf: SCREAMING_SNAKE_CASE : Optional[Any] = list(self.nodes.values() )[0] SCREAMING_SNAKE_CASE : Dict = merging_node.is_leaf self.prefix += merging_node.prefix SCREAMING_SNAKE_CASE : Tuple = merging_node.nodes # If there is more than 1 edge, we just mark it as non-leaf elif len(incoming_node.nodes ) > 1: SCREAMING_SNAKE_CASE : List[Any] = False # If there is 1 edge, we merge it with its child else: SCREAMING_SNAKE_CASE : int = list(incoming_node.nodes.values() )[0] SCREAMING_SNAKE_CASE : Union[str, Any] = merging_node.is_leaf incoming_node.prefix += merging_node.prefix SCREAMING_SNAKE_CASE : Any = merging_node.nodes return True def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Tuple = 0 ): '''simple docstring''' if self.prefix != "": print("""-""" * height , self.prefix , """ (leaf)""" if self.is_leaf else """""" ) for value in self.nodes.values(): value.print_tree(height + 1 ) def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = """banana bananas bandana band apple all beast""".split() SCREAMING_SNAKE_CASE : List[str] = RadixNode() root.insert_many(_UpperCamelCase ) assert all(root.find(_UpperCamelCase ) for word in words ) assert not root.find("""bandanas""" ) assert not root.find("""apps""" ) root.delete("""all""" ) assert not root.find("""all""" ) root.delete("""banana""" ) assert not root.find("""banana""" ) assert root.find("""bananas""" ) return True def __A ( ): """simple docstring""" assert test_trie() def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = RadixNode() SCREAMING_SNAKE_CASE : List[Any] = """banana bananas bandanas bandana band apple all beast""".split() root.insert_many(_UpperCamelCase ) print("""Words:""" , _UpperCamelCase ) print("""Tree:""" ) root.print_tree() if __name__ == "__main__": main()
715
'''simple docstring''' import itertools import os import random import tempfile import unittest import numpy as np from transformers import TvltFeatureExtractor, is_datasets_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch if is_datasets_available(): from datasets import load_dataset __UpperCAmelCase = random.Random() def __A ( lowerCamelCase_ , lowerCamelCase_=1.0 , lowerCamelCase_=None , lowerCamelCase_=None ): """simple docstring""" if rng is None: SCREAMING_SNAKE_CASE : Optional[Any] = global_rng SCREAMING_SNAKE_CASE : Optional[int] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self : List[str] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int]=7 , lowerCamelCase_ : Optional[int]=4_00 , lowerCamelCase_ : int=20_00 , lowerCamelCase_ : List[str]=20_48 , lowerCamelCase_ : Optional[Any]=1_28 , lowerCamelCase_ : Optional[Any]=1 , lowerCamelCase_ : str=5_12 , lowerCamelCase_ : Dict=30 , lowerCamelCase_ : Dict=4_41_00 , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = parent SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size SCREAMING_SNAKE_CASE : List[str] = min_seq_length SCREAMING_SNAKE_CASE : Any = max_seq_length SCREAMING_SNAKE_CASE : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) SCREAMING_SNAKE_CASE : int = spectrogram_length SCREAMING_SNAKE_CASE : List[Any] = feature_size SCREAMING_SNAKE_CASE : Any = num_audio_channels SCREAMING_SNAKE_CASE : Tuple = hop_length SCREAMING_SNAKE_CASE : str = chunk_length SCREAMING_SNAKE_CASE : Dict = sampling_rate def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return { "spectrogram_length": self.spectrogram_length, "feature_size": self.feature_size, "num_audio_channels": self.num_audio_channels, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "sampling_rate": self.sampling_rate, } def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Tuple=False , lowerCamelCase_ : Any=False ): '''simple docstring''' def _flatten(lowerCamelCase_ : Dict ): return list(itertools.chain(*lowerCamelCase_ ) ) if equal_length: SCREAMING_SNAKE_CASE : Optional[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size SCREAMING_SNAKE_CASE : Dict = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: SCREAMING_SNAKE_CASE : Optional[int] = [np.asarray(lowerCamelCase_ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class UpperCamelCase__ ( lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = TvltFeatureExtractor def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = TvltFeatureExtractionTester(self ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(lowerCamelCase_ , """spectrogram_length""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """feature_size""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """num_audio_channels""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """hop_length""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """chunk_length""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """sampling_rate""" ) ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE : Any = feat_extract_first.save_pretrained(lowerCamelCase_ )[0] check_json_file_has_correct_format(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_extraction_class.from_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = feat_extract_first.to_dict() SCREAMING_SNAKE_CASE : List[Any] = feat_extract_second.to_dict() SCREAMING_SNAKE_CASE : Optional[int] = dict_first.pop("""mel_filters""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = dict_second.pop("""mel_filters""" ) self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ ) ) self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE : int = os.path.join(lowerCamelCase_ , """feat_extract.json""" ) feat_extract_first.to_json_file(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = self.feature_extraction_class.from_json_file(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = feat_extract_first.to_dict() SCREAMING_SNAKE_CASE : int = feat_extract_second.to_dict() SCREAMING_SNAKE_CASE : List[str] = dict_first.pop("""mel_filters""" ) SCREAMING_SNAKE_CASE : Optional[Any] = dict_second.pop("""mel_filters""" ) self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ ) ) self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.feature_extraction_class(**self.feat_extract_dict ) # create three inputs of length 800, 1000, and 1200 SCREAMING_SNAKE_CASE : Optional[int] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] SCREAMING_SNAKE_CASE : int = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs] # Test not batched input SCREAMING_SNAKE_CASE : Union[str, Any] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=4_41_00 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test batched SCREAMING_SNAKE_CASE : Optional[int] = feature_extractor(lowerCamelCase_ , return_tensors="""np""" , sampling_rate=4_41_00 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test audio masking SCREAMING_SNAKE_CASE : List[str] = feature_extractor( lowerCamelCase_ , return_tensors="""np""" , sampling_rate=4_41_00 , mask_audio=lowerCamelCase_ ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test 2-D numpy arrays are batched. SCREAMING_SNAKE_CASE : Optional[int] = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)] SCREAMING_SNAKE_CASE : int = np.asarray(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = feature_extractor(lowerCamelCase_ , return_tensors="""np""" , sampling_rate=4_41_00 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" ) # automatic decoding with librispeech SCREAMING_SNAKE_CASE : Union[str, Any] = ds.sort("""id""" ).select(range(lowerCamelCase_ ) )[:num_samples]["""audio"""] return [x["array"] for x in speech_samples] def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self._load_datasamples(1 ) SCREAMING_SNAKE_CASE : Tuple = TvltFeatureExtractor() SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(lowerCamelCase_ , return_tensors="""pt""" ).audio_values self.assertEquals(audio_values.shape , (1, 1, 1_92, 1_28) ) SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[-0.3_032, -0.2_708], [-0.4_434, -0.4_007]] ) self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , lowerCamelCase_ , atol=1e-4 ) )
79
0
def __A ( lowerCamelCase_ = 1_00_00_00 ): """simple docstring""" SCREAMING_SNAKE_CASE : int = 1 SCREAMING_SNAKE_CASE : Any = 1 SCREAMING_SNAKE_CASE : List[Any] = {1: 1} for inputa in range(2 , lowerCamelCase_ ): SCREAMING_SNAKE_CASE : str = 0 SCREAMING_SNAKE_CASE : List[str] = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: SCREAMING_SNAKE_CASE : Optional[int] = (3 * number) + 1 counter += 1 if inputa not in counters: SCREAMING_SNAKE_CASE : Optional[int] = counter if counter > pre_counter: SCREAMING_SNAKE_CASE : Union[str, Any] = inputa SCREAMING_SNAKE_CASE : List[str] = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
716
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __UpperCAmelCase = { """configuration_mvp""": ["""MVP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MvpConfig""", """MvpOnnxConfig"""], """tokenization_mvp""": ["""MvpTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ["""MvpTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ """MVP_PRETRAINED_MODEL_ARCHIVE_LIST""", """MvpForCausalLM""", """MvpForConditionalGeneration""", """MvpForQuestionAnswering""", """MvpForSequenceClassification""", """MvpModel""", """MvpPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
79
0
'''simple docstring''' import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} __UpperCAmelCase = { "vocab_file": { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json" ), }, } __UpperCAmelCase = { "vocab_file": { "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json" ), }, } __UpperCAmelCase = { "vocab_file": { "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json" ), }, } __UpperCAmelCase = { "facebook/dpr-ctx_encoder-single-nq-base": 512, "facebook/dpr-ctx_encoder-multiset-base": 512, } __UpperCAmelCase = { "facebook/dpr-question_encoder-single-nq-base": 512, "facebook/dpr-question_encoder-multiset-base": 512, } __UpperCAmelCase = { "facebook/dpr-reader-single-nq-base": 512, "facebook/dpr-reader-multiset-base": 512, } __UpperCAmelCase = { "facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True}, "facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True}, } __UpperCAmelCase = { "facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True}, "facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True}, } __UpperCAmelCase = { "facebook/dpr-reader-single-nq-base": {"do_lower_case": True}, "facebook/dpr-reader-multiset-base": {"do_lower_case": True}, } class UpperCamelCase__ ( __lowerCAmelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION SCREAMING_SNAKE_CASE__ = DPRContextEncoderTokenizer class UpperCamelCase__ ( __lowerCAmelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION SCREAMING_SNAKE_CASE__ = DPRQuestionEncoderTokenizer __UpperCAmelCase = collections.namedtuple( """DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""] ) __UpperCAmelCase = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""]) __UpperCAmelCase = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n " @add_start_docstrings(__lowerCAmelCase ) class UpperCamelCase__ : """simple docstring""" def __call__( self : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Union[bool, str] = False , lowerCamelCase_ : Union[bool, str] = False , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[Union[str, TensorType]] = None , lowerCamelCase_ : Optional[bool] = None , **lowerCamelCase_ : Optional[Any] , ): '''simple docstring''' if titles is None and texts is None: return super().__call__( lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , return_tensors=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , **lowerCamelCase__ , ) elif titles is None or texts is None: SCREAMING_SNAKE_CASE : int = titles if texts is None else texts return super().__call__( lowerCamelCase__ , lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , return_tensors=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , **lowerCamelCase__ , ) SCREAMING_SNAKE_CASE : Union[str, Any] = titles if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) else [titles] SCREAMING_SNAKE_CASE : Optional[int] = texts if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) else [texts] SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCamelCase__ ) SCREAMING_SNAKE_CASE : str = questions if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) else [questions] * n_passages assert len(lowerCamelCase__ ) == len( lowerCamelCase__ ), f'''There should be as many titles than texts but got {len(lowerCamelCase__ )} titles and {len(lowerCamelCase__ )} texts.''' SCREAMING_SNAKE_CASE : Any = super().__call__(lowerCamelCase__ , lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ )['''input_ids'''] SCREAMING_SNAKE_CASE : Tuple = super().__call__(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ )['''input_ids'''] SCREAMING_SNAKE_CASE : str = { '''input_ids''': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(lowerCamelCase__ , lowerCamelCase__ ) ] } if return_attention_mask is not False: SCREAMING_SNAKE_CASE : int = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) SCREAMING_SNAKE_CASE : Optional[int] = attention_mask return self.pad(lowerCamelCase__ , padding=lowerCamelCase__ , max_length=lowerCamelCase__ , return_tensors=lowerCamelCase__ ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : BatchEncoding , lowerCamelCase_ : DPRReaderOutput , lowerCamelCase_ : int = 16 , lowerCamelCase_ : int = 64 , lowerCamelCase_ : int = 4 , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = reader_input['''input_ids'''] SCREAMING_SNAKE_CASE : Union[str, Any] = reader_output[:3] SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCamelCase__ ) SCREAMING_SNAKE_CASE : Optional[Any] = sorted(range(lowerCamelCase__ ) , reverse=lowerCamelCase__ , key=relevance_logits.__getitem__ ) SCREAMING_SNAKE_CASE : List[DPRReaderOutput] = [] for doc_id in sorted_docs: SCREAMING_SNAKE_CASE : int = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence SCREAMING_SNAKE_CASE : Tuple = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: SCREAMING_SNAKE_CASE : Tuple = sequence_ids.index(self.pad_token_id ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = len(lowerCamelCase__ ) SCREAMING_SNAKE_CASE : Optional[Any] = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCamelCase__ , top_spans=lowerCamelCase__ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCamelCase__ , start_index=lowerCamelCase__ , end_index=lowerCamelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(lowerCamelCase__ ) >= num_spans: break return nbest_spans_predictions[:num_spans] def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : List[int] , lowerCamelCase_ : int , lowerCamelCase_ : int , ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = [] for start_index, start_score in enumerate(lowerCamelCase__ ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) SCREAMING_SNAKE_CASE : Dict = sorted(lowerCamelCase__ , key=lambda lowerCamelCase_ : x[1] , reverse=lowerCamelCase__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = [] for (start_index, end_index), score in scores: assert start_index <= end_index, f'''Wrong span indices: [{start_index}:{end_index}]''' SCREAMING_SNAKE_CASE : List[str] = end_index - start_index + 1 assert length <= max_answer_length, f'''Span is too long: {length} > {max_answer_length}''' if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(lowerCamelCase__ ) == top_spans: break return chosen_span_intervals @add_end_docstrings(__lowerCAmelCase ) class UpperCamelCase__ ( __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ = READER_PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ = READER_PRETRAINED_INIT_CONFIGURATION SCREAMING_SNAKE_CASE__ = ["input_ids", "attention_mask"] SCREAMING_SNAKE_CASE__ = DPRReaderTokenizer
717
'''simple docstring''' __UpperCAmelCase = [ """Audio""", """Array2D""", """Array3D""", """Array4D""", """Array5D""", """ClassLabel""", """Features""", """Sequence""", """Value""", """Image""", """Translation""", """TranslationVariableLanguages""", ] from .audio import Audio from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value from .image import Image from .translation import Translation, TranslationVariableLanguages
79
0
'''simple docstring''' def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
718
'''simple docstring''' from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) __UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name __UpperCAmelCase = """ Examples: ```py >>> import torch >>> import numpy as np >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline >>> from transformers import pipeline >>> from diffusers.utils import load_image >>> def make_hint(image, depth_estimator): ... image = depth_estimator(image)[\"depth\"] ... image = np.array(image) ... image = image[:, :, None] ... image = np.concatenate([image, image, image], axis=2) ... detected_map = torch.from_numpy(image).float() / 255.0 ... hint = detected_map.permute(2, 0, 1) ... return hint >>> depth_estimator = pipeline(\"depth-estimation\") >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained( ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16 ... ) >>> pipe_prior = pipe_prior.to(\"cuda\") >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained( ... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16 ... ) >>> pipe = pipe.to(\"cuda\") >>> img = load_image( ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\" ... \"/kandinsky/cat.png\" ... ).resize((768, 768)) >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\") >>> prompt = \"A robot, 4k photo\" >>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\" >>> generator = torch.Generator(device=\"cuda\").manual_seed(43) >>> image_emb, zero_image_emb = pipe_prior( ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator ... ).to_tuple() >>> images = pipe( ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... hint=hint, ... num_inference_steps=50, ... generator=generator, ... height=768, ... width=768, ... ).images >>> images[0].save(\"robot_cat.png\") ``` """ def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=8 ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 SCREAMING_SNAKE_CASE : List[str] = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Union[str, Any] , lowerCamelCase_ : UNetaDConditionModel , lowerCamelCase_ : DDPMScheduler , lowerCamelCase_ : VQModel , ): '''simple docstring''' super().__init__() self.register_modules( unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , movq=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : str = 2 ** (len(self.movq.config.block_out_channels ) - 1) def lowerCamelCase_ ( self : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple , lowerCamelCase_ : int ): '''simple docstring''' if latents is None: SCREAMING_SNAKE_CASE : Tuple = randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ , device=lowerCamelCase_ , dtype=lowerCamelCase_ ) else: if latents.shape != shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) SCREAMING_SNAKE_CASE : Dict = latents.to(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = latents * scheduler.init_noise_sigma return latents def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Dict=0 ): '''simple docstring''' if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) SCREAMING_SNAKE_CASE : List[Any] = torch.device(f'''cuda:{gpu_id}''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Tuple=0 ): '''simple docstring''' if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ): from accelerate import cpu_offload_with_hook else: raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" ) SCREAMING_SNAKE_CASE : Any = torch.device(f'''cuda:{gpu_id}''' ) if self.device.type != "cpu": self.to("""cpu""" , silence_dtype_warnings=lowerCamelCase_ ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) SCREAMING_SNAKE_CASE : Union[str, Any] = None for cpu_offloaded_model in [self.unet, self.movq]: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = cpu_offload_with_hook(lowerCamelCase_ , lowerCamelCase_ , prev_module_hook=lowerCamelCase_ ) # We'll offload the last model manually. SCREAMING_SNAKE_CASE : str = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def lowerCamelCase_ ( self : str ): '''simple docstring''' if not hasattr(self.unet , """_hf_hook""" ): return self.device for module in self.unet.modules(): if ( hasattr(lowerCamelCase_ , """_hf_hook""" ) and hasattr(module._hf_hook , """execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(lowerCamelCase_ ) def __call__( self : Optional[Any] , lowerCamelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCamelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 1_00 , lowerCamelCase_ : float = 4.0 , lowerCamelCase_ : int = 1 , lowerCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ : Optional[torch.FloatTensor] = None , lowerCamelCase_ : Optional[str] = "pil" , lowerCamelCase_ : bool = True , ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self._execution_device SCREAMING_SNAKE_CASE : Optional[int] = guidance_scale > 1.0 if isinstance(lowerCamelCase_ , lowerCamelCase_ ): SCREAMING_SNAKE_CASE : int = torch.cat(lowerCamelCase_ , dim=0 ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Dict = torch.cat(lowerCamelCase_ , dim=0 ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat(lowerCamelCase_ , dim=0 ) SCREAMING_SNAKE_CASE : Any = image_embeds.shape[0] * num_images_per_prompt if do_classifier_free_guidance: SCREAMING_SNAKE_CASE : List[Any] = image_embeds.repeat_interleave(lowerCamelCase_ , dim=0 ) SCREAMING_SNAKE_CASE : Optional[int] = negative_image_embeds.repeat_interleave(lowerCamelCase_ , dim=0 ) SCREAMING_SNAKE_CASE : Dict = hint.repeat_interleave(lowerCamelCase_ , dim=0 ) SCREAMING_SNAKE_CASE : List[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCamelCase_ ) self.scheduler.set_timesteps(lowerCamelCase_ , device=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler.timesteps SCREAMING_SNAKE_CASE : Any = self.movq.config.latent_channels SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = downscale_height_and_width(lowerCamelCase_ , lowerCamelCase_ , self.movq_scale_factor ) # create initial latent SCREAMING_SNAKE_CASE : str = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , self.scheduler , ) for i, t in enumerate(self.progress_bar(lowerCamelCase_ ) ): # expand the latents if we are doing classifier free guidance SCREAMING_SNAKE_CASE : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents SCREAMING_SNAKE_CASE : Union[str, Any] = {"""image_embeds""": image_embeds, """hint""": hint} SCREAMING_SNAKE_CASE : Dict = self.unet( sample=lowerCamelCase_ , timestep=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , added_cond_kwargs=lowerCamelCase_ , return_dict=lowerCamelCase_ , )[0] if do_classifier_free_guidance: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = noise_pred.split(latents.shape[1] , dim=1 ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = noise_pred.chunk(2 ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = variance_pred.chunk(2 ) SCREAMING_SNAKE_CASE : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) SCREAMING_SNAKE_CASE : str = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , """variance_type""" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 SCREAMING_SNAKE_CASE : str = self.scheduler.step( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ , )[0] # post-processing SCREAMING_SNAKE_CASE : List[str] = self.movq.decode(lowerCamelCase_ , force_not_quantize=lowerCamelCase_ )["""sample"""] if output_type not in ["pt", "np", "pil"]: raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' ) if output_type in ["np", "pil"]: SCREAMING_SNAKE_CASE : Optional[int] = image * 0.5 + 0.5 SCREAMING_SNAKE_CASE : List[Any] = image.clamp(0 , 1 ) SCREAMING_SNAKE_CASE : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": SCREAMING_SNAKE_CASE : Any = self.numpy_to_pil(lowerCamelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCamelCase_ )
79
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available __UpperCAmelCase = { """configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ """LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""", """LongT5EncoderModel""", """LongT5ForConditionalGeneration""", """LongT5Model""", """LongT5PreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ """FlaxLongT5ForConditionalGeneration""", """FlaxLongT5Model""", """FlaxLongT5PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longta import ( LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongTaEncoderModel, LongTaForConditionalGeneration, LongTaModel, LongTaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_longta import ( FlaxLongTaForConditionalGeneration, FlaxLongTaModel, FlaxLongTaPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
719
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: __UpperCAmelCase = None __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} __UpperCAmelCase = { """vocab_file""": { """google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""", """google/bigbird-roberta-large""": ( """https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model""" ), """google/bigbird-base-trivia-itc""": ( """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model""" ), }, """tokenizer_file""": { """google/bigbird-roberta-base""": ( """https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json""" ), """google/bigbird-roberta-large""": ( """https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json""" ), """google/bigbird-base-trivia-itc""": ( """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json""" ), }, } __UpperCAmelCase = { """google/bigbird-roberta-base""": 4096, """google/bigbird-roberta-large""": 4096, """google/bigbird-base-trivia-itc""": 4096, } __UpperCAmelCase = """▁""" class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ = BigBirdTokenizer SCREAMING_SNAKE_CASE__ = ['''input_ids''', '''attention_mask'''] SCREAMING_SNAKE_CASE__ = [] def __init__( self : Any , lowerCamelCase_ : str=None , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict="<unk>" , lowerCamelCase_ : int="<s>" , lowerCamelCase_ : Optional[Any]="</s>" , lowerCamelCase_ : Dict="<pad>" , lowerCamelCase_ : Tuple="[SEP]" , lowerCamelCase_ : Dict="[MASK]" , lowerCamelCase_ : Union[str, Any]="[CLS]" , **lowerCamelCase_ : Dict , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else bos_token SCREAMING_SNAKE_CASE : Dict = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token SCREAMING_SNAKE_CASE : int = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token SCREAMING_SNAKE_CASE : Any = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else cls_token SCREAMING_SNAKE_CASE : Any = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE : int = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token super().__init__( lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , **lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : List[Any] = vocab_file SCREAMING_SNAKE_CASE : Optional[Any] = False if not self.vocab_file else True def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = [self.sep_token_id] SCREAMING_SNAKE_CASE : int = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ): '''simple docstring''' if already_has_special_tokens: if token_ids_a is not None: raise ValueError( """You should not supply a second sequence if the provided sequence of """ """ids is already formatted with special tokens for the model.""" ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase_ )) + [1] return [1] + ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) + [1] def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = [self.sep_token_id] SCREAMING_SNAKE_CASE : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCamelCase_ ( self : str , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(lowerCamelCase_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return SCREAMING_SNAKE_CASE : Tuple = os.path.join( lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ): copyfile(self.vocab_file , lowerCamelCase_ ) return (out_vocab_file,)
79
0
'''simple docstring''' import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=True , lowerCamelCase_="pt" ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = {"""add_prefix_space""": True} if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and not line.startswith(""" """ ) else {} SCREAMING_SNAKE_CASE : List[str] = padding_side return tokenizer( [line] , max_length=lowerCamelCase__ , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase__ , return_tensors=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , **lowerCamelCase__ , ) def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = input_ids.ne(lowerCamelCase__ ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict="train" , lowerCamelCase_ : Dict=None , lowerCamelCase_ : str=None , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Dict="" , ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Tuple = Path(lowerCamelCase_ ).joinpath(type_path + """.source""" ) SCREAMING_SNAKE_CASE : List[str] = Path(lowerCamelCase_ ).joinpath(type_path + """.target""" ) SCREAMING_SNAKE_CASE : str = self.get_char_lens(self.src_file ) SCREAMING_SNAKE_CASE : int = max_source_length SCREAMING_SNAKE_CASE : str = max_target_length assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}''' SCREAMING_SNAKE_CASE : str = tokenizer SCREAMING_SNAKE_CASE : str = prefix if n_obs is not None: SCREAMING_SNAKE_CASE : Tuple = self.src_lens[:n_obs] SCREAMING_SNAKE_CASE : Dict = src_lang SCREAMING_SNAKE_CASE : Dict = tgt_lang def __len__( self : Dict ): '''simple docstring''' return len(self.src_lens ) def __getitem__( self : Union[str, Any] , lowerCamelCase_ : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = index + 1 # linecache starts at 1 SCREAMING_SNAKE_CASE : Any = self.prefix + linecache.getline(str(self.src_file ) , lowerCamelCase_ ).rstrip("""\n""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = linecache.getline(str(self.tgt_file ) , lowerCamelCase_ ).rstrip("""\n""" ) assert source_line, f'''empty source line for index {index}''' assert tgt_line, f'''empty tgt line for index {index}''' # Need to add eos token manually for T5 if isinstance(self.tokenizer , lowerCamelCase_ ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right SCREAMING_SNAKE_CASE : Dict = ( self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCamelCase_ ) else self.tokenizer ) SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.generator if isinstance(self.tokenizer , lowerCamelCase_ ) else self.tokenizer SCREAMING_SNAKE_CASE : Tuple = encode_line(lowerCamelCase_ , lowerCamelCase_ , self.max_source_length , """right""" ) SCREAMING_SNAKE_CASE : Tuple = encode_line(lowerCamelCase_ , lowerCamelCase_ , self.max_target_length , """right""" ) SCREAMING_SNAKE_CASE : Optional[Any] = source_inputs["""input_ids"""].squeeze() SCREAMING_SNAKE_CASE : Optional[Any] = target_inputs["""input_ids"""].squeeze() SCREAMING_SNAKE_CASE : List[Any] = source_inputs["""attention_mask"""].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def lowerCamelCase_ ( lowerCamelCase_ : List[str] ): '''simple docstring''' return [len(lowerCamelCase_ ) for x in Path(lowerCamelCase_ ).open().readlines()] def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = torch.stack([x["""input_ids"""] for x in batch] ) SCREAMING_SNAKE_CASE : Any = torch.stack([x["""attention_mask"""] for x in batch] ) SCREAMING_SNAKE_CASE : Optional[int] = torch.stack([x["""decoder_input_ids"""] for x in batch] ) SCREAMING_SNAKE_CASE : Tuple = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer , lowerCamelCase_ ) else self.tokenizer.pad_token_id ) SCREAMING_SNAKE_CASE : Tuple = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer , lowerCamelCase_ ) else self.tokenizer.pad_token_id ) SCREAMING_SNAKE_CASE : List[Any] = trim_batch(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = trim_batch(lowerCamelCase_ , lowerCamelCase_ , attention_mask=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = { """input_ids""": source_ids, """attention_mask""": source_mask, """decoder_input_ids""": y, } return batch __UpperCAmelCase = getLogger(__name__) def __A ( lowerCamelCase_ ): """simple docstring""" return list(itertools.chain.from_iterable(lowerCamelCase__ ) ) def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = get_git_info() save_json(lowerCamelCase__ , os.path.join(lowerCamelCase__ , """git_log.json""" ) ) def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=4 , **lowerCamelCase_ ): """simple docstring""" with open(lowerCamelCase__ , """w""" ) as f: json.dump(lowerCamelCase__ , lowerCamelCase__ , indent=lowerCamelCase__ , **lowerCamelCase__ ) def __A ( lowerCamelCase_ ): """simple docstring""" with open(lowerCamelCase__ ) as f: return json.load(lowerCamelCase__ ) def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : int = git.Repo(search_parent_directories=lowerCamelCase__ ) SCREAMING_SNAKE_CASE : List[Any] = { """repo_id""": str(lowerCamelCase__ ), """repo_sha""": str(repo.head.object.hexsha ), """repo_branch""": str(repo.active_branch ), """hostname""": str(socket.gethostname() ), } return repo_infos def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return list(map(lowerCamelCase__ , lowerCamelCase__ ) ) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" with open(lowerCamelCase__ , """wb""" ) as f: return pickle.dump(lowerCamelCase__ , lowerCamelCase__ ) def __A ( lowerCamelCase_ ): """simple docstring""" def remove_articles(lowerCamelCase_ ): return re.sub(R"""\b(a|an|the)\b""" , """ """ , lowerCamelCase__ ) def white_space_fix(lowerCamelCase_ ): return " ".join(text.split() ) def remove_punc(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Optional[Any] = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(lowerCamelCase_ ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase__ ) ) ) ) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = normalize_answer(lowerCamelCase__ ).split() SCREAMING_SNAKE_CASE : Optional[Any] = normalize_answer(lowerCamelCase__ ).split() SCREAMING_SNAKE_CASE : int = Counter(lowerCamelCase__ ) & Counter(lowerCamelCase__ ) SCREAMING_SNAKE_CASE : Optional[Any] = sum(common.values() ) if num_same == 0: return 0 SCREAMING_SNAKE_CASE : Dict = 1.0 * num_same / len(lowerCamelCase__ ) SCREAMING_SNAKE_CASE : List[str] = 1.0 * num_same / len(lowerCamelCase__ ) SCREAMING_SNAKE_CASE : Optional[Any] = (2 * precision * recall) / (precision + recall) return fa def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return normalize_answer(lowerCamelCase__ ) == normalize_answer(lowerCamelCase__ ) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" assert len(lowerCamelCase__ ) == len(lowerCamelCase__ ) SCREAMING_SNAKE_CASE : Dict = 0 for hypo, pred in zip(lowerCamelCase__ , lowerCamelCase__ ): em += exact_match_score(lowerCamelCase__ , lowerCamelCase__ ) if len(lowerCamelCase__ ) > 0: em /= len(lowerCamelCase__ ) return {"em": em} def __A ( lowerCamelCase_ ): """simple docstring""" return model_prefix.startswith("""rag""" ) def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : int = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead SCREAMING_SNAKE_CASE : Dict = """dropout_rate""" for p in extra_params: if getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): if not hasattr(lowerCamelCase__ , lowerCamelCase__ ) and not hasattr(lowerCamelCase__ , equivalent_param[p] ): logger.info("""config doesn\'t have a `{}` attribute""".format(lowerCamelCase__ ) ) delattr(lowerCamelCase__ , lowerCamelCase__ ) continue SCREAMING_SNAKE_CASE : Optional[int] = p if hasattr(lowerCamelCase__ , lowerCamelCase__ ) else equivalent_param[p] setattr(lowerCamelCase__ , lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) ) delattr(lowerCamelCase__ , lowerCamelCase__ ) return hparams, config
720
'''simple docstring''' import unittest import torch from torch import nn from diffusers.models.activations import get_activation class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = get_activation("""swish""" ) self.assertIsInstance(lowerCamelCase_ , nn.SiLU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = get_activation("""silu""" ) self.assertIsInstance(lowerCamelCase_ , nn.SiLU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = get_activation("""mish""" ) self.assertIsInstance(lowerCamelCase_ , nn.Mish ) self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = get_activation("""gelu""" ) self.assertIsInstance(lowerCamelCase_ , nn.GELU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
79
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { """facebook/deit-base-distilled-patch16-224""": ( """https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json""" ), # See all DeiT models at https://huggingface.co/models?filter=deit } class UpperCamelCase__ ( snake_case__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''deit''' def __init__( self : Dict , lowerCamelCase_ : Optional[int]=7_68 , lowerCamelCase_ : Any=12 , lowerCamelCase_ : Union[str, Any]=12 , lowerCamelCase_ : str=30_72 , lowerCamelCase_ : Tuple="gelu" , lowerCamelCase_ : Dict=0.0 , lowerCamelCase_ : Optional[Any]=0.0 , lowerCamelCase_ : Union[str, Any]=0.02 , lowerCamelCase_ : Tuple=1e-12 , lowerCamelCase_ : Optional[int]=2_24 , lowerCamelCase_ : Optional[int]=16 , lowerCamelCase_ : Optional[Any]=3 , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Optional[int]=16 , **lowerCamelCase_ : List[Any] , ): '''simple docstring''' super().__init__(**_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers SCREAMING_SNAKE_CASE : int = num_attention_heads SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size SCREAMING_SNAKE_CASE : Optional[int] = hidden_act SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : List[str] = initializer_range SCREAMING_SNAKE_CASE : int = layer_norm_eps SCREAMING_SNAKE_CASE : int = image_size SCREAMING_SNAKE_CASE : Dict = patch_size SCREAMING_SNAKE_CASE : List[Any] = num_channels SCREAMING_SNAKE_CASE : int = qkv_bias SCREAMING_SNAKE_CASE : Dict = encoder_stride class UpperCamelCase__ ( snake_case__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = version.parse('''1.11''' ) @property def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' return 1e-4
721
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { """microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""", """microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""", """microsoft/deberta-v2-xlarge-mnli""": ( """https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json""" ), """microsoft/deberta-v2-xxlarge-mnli""": ( """https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json""" ), } class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''deberta-v2''' def __init__( self : int , lowerCamelCase_ : Optional[Any]=12_81_00 , lowerCamelCase_ : str=15_36 , lowerCamelCase_ : int=24 , lowerCamelCase_ : List[str]=24 , lowerCamelCase_ : List[Any]=61_44 , lowerCamelCase_ : List[Any]="gelu" , lowerCamelCase_ : Optional[Any]=0.1 , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : str=5_12 , lowerCamelCase_ : str=0 , lowerCamelCase_ : Union[str, Any]=0.02 , lowerCamelCase_ : Dict=1e-7 , lowerCamelCase_ : Optional[int]=False , lowerCamelCase_ : Optional[int]=-1 , lowerCamelCase_ : List[str]=0 , lowerCamelCase_ : Tuple=True , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : Optional[Any]=0 , lowerCamelCase_ : Dict="gelu" , **lowerCamelCase_ : Optional[int] , ): '''simple docstring''' super().__init__(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = hidden_size SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers SCREAMING_SNAKE_CASE : int = num_attention_heads SCREAMING_SNAKE_CASE : List[str] = intermediate_size SCREAMING_SNAKE_CASE : int = hidden_act SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : str = max_position_embeddings SCREAMING_SNAKE_CASE : Tuple = type_vocab_size SCREAMING_SNAKE_CASE : Optional[int] = initializer_range SCREAMING_SNAKE_CASE : List[Any] = relative_attention SCREAMING_SNAKE_CASE : str = max_relative_positions SCREAMING_SNAKE_CASE : int = pad_token_id SCREAMING_SNAKE_CASE : List[str] = position_biased_input # Backwards compatibility if type(lowerCamelCase_ ) == str: SCREAMING_SNAKE_CASE : Dict = [x.strip() for x in pos_att_type.lower().split("""|""" )] SCREAMING_SNAKE_CASE : Any = pos_att_type SCREAMING_SNAKE_CASE : Any = vocab_size SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_eps SCREAMING_SNAKE_CASE : str = kwargs.get("""pooler_hidden_size""" , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = pooler_dropout SCREAMING_SNAKE_CASE : Any = pooler_hidden_act class UpperCamelCase__ ( lowercase_ ): """simple docstring""" @property def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' if self.task == "multiple-choice": SCREAMING_SNAKE_CASE : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: SCREAMING_SNAKE_CASE : Union[str, Any] = {0: """batch""", 1: """sequence"""} if self._config.type_vocab_size > 0: return OrderedDict( [("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] ) else: return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] ) @property def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' return 12 def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCamelCase_ : int = -1 , lowerCamelCase_ : int = -1 , lowerCamelCase_ : int = -1 , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional["TensorType"] = None , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 40 , lowerCamelCase_ : int = 40 , lowerCamelCase_ : "PreTrainedTokenizerBase" = None , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = super().generate_dummy_inputs(preprocessor=lowerCamelCase_ , framework=lowerCamelCase_ ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
79
0
'''simple docstring''' import os import tempfile import unittest import uuid from pathlib import Path from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available if is_torch_available(): import torch if is_soundfile_availble(): import soundfile as sf if is_vision_available(): from PIL import Image def __A ( lowerCamelCase_="" ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = tempfile.mkdtemp() return os.path.join(lowerCamelCase_ , str(uuid.uuida() ) + suffix ) @require_soundfile @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = torch.rand(12 , dtype=torch.floataa ) - 0.5 SCREAMING_SNAKE_CASE : Union[str, Any] = AgentAudio(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(lowerCamelCase_ , agent_type.to_raw() , atol=1e-4 ) ) del agent_type # Ensure the path remains even after the object deletion self.assertTrue(os.path.exists(lowerCamelCase_ ) ) # Ensure that the file contains the same value as the original tensor SCREAMING_SNAKE_CASE : Union[str, Any] = sf.read(lowerCamelCase_ ) self.assertTrue(torch.allclose(lowerCamelCase_ , torch.tensor(lowerCamelCase_ ) , atol=1e-4 ) ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = torch.rand(12 , dtype=torch.floataa ) - 0.5 SCREAMING_SNAKE_CASE : List[str] = get_new_path(suffix=""".wav""" ) sf.write(lowerCamelCase_ , lowerCamelCase_ , 1_60_00 ) SCREAMING_SNAKE_CASE : Optional[int] = AgentAudio(lowerCamelCase_ ) self.assertTrue(torch.allclose(lowerCamelCase_ , agent_type.to_raw() , atol=1e-4 ) ) self.assertEqual(agent_type.to_string() , lowerCamelCase_ ) @require_vision @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = torch.randint(0 , 2_56 , (64, 64, 3) ) SCREAMING_SNAKE_CASE : Union[str, Any] = AgentImage(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(lowerCamelCase_ , agent_type._tensor , atol=1e-4 ) ) self.assertIsInstance(agent_type.to_raw() , Image.Image ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(lowerCamelCase_ ) ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" SCREAMING_SNAKE_CASE : Optional[int] = Image.open(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = AgentImage(lowerCamelCase_ ) self.assertTrue(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(lowerCamelCase_ ) ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" SCREAMING_SNAKE_CASE : List[str] = Image.open(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = AgentImage(lowerCamelCase_ ) self.assertFalse(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(lowerCamelCase_ ) ) class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = """Hey!""" SCREAMING_SNAKE_CASE : Tuple = AgentText(lowerCamelCase_ ) self.assertEqual(lowerCamelCase_ , agent_type.to_string() ) self.assertEqual(lowerCamelCase_ , agent_type.to_raw() ) self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
700
'''simple docstring''' from collections import deque from math import floor from random import random from time import time class UpperCamelCase__ : """simple docstring""" def __init__( self : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = {} def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[int]=1 ): '''simple docstring''' if self.graph.get(lowerCamelCase_ ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: SCREAMING_SNAKE_CASE : str = [[w, v]] if not self.graph.get(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Tuple = [] def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' return list(self.graph ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : str ): '''simple docstring''' if self.graph.get(lowerCamelCase_ ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any]=-2 , lowerCamelCase_ : str=-1 ): '''simple docstring''' if s == d: return [] SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : List[str] = [] if s == -2: SCREAMING_SNAKE_CASE : Tuple = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : Optional[Any] = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowerCamelCase_ ) return visited else: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : Tuple = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : int = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : Any = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return visited def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Optional[int]=-1 ): '''simple docstring''' if c == -1: SCREAMING_SNAKE_CASE : str = floor(random() * 1_00_00 ) + 10 for i in range(lowerCamelCase_ ): # every vertex has max 100 edges for _ in range(floor(random() * 1_02 ) + 1 ): SCREAMING_SNAKE_CASE : Union[str, Any] = floor(random() * c ) + 1 if n != i: self.add_pair(lowerCamelCase_ , lowerCamelCase_ , 1 ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Any=-2 ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = deque() SCREAMING_SNAKE_CASE : List[str] = [] if s == -2: SCREAMING_SNAKE_CASE : int = list(self.graph )[0] d.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) while d: SCREAMING_SNAKE_CASE : Dict = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Tuple ): '''simple docstring''' return len(self.graph[u] ) def lowerCamelCase_ ( self : str , lowerCamelCase_ : Optional[Any]=-2 ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = [] SCREAMING_SNAKE_CASE : List[str] = [] if s == -2: SCREAMING_SNAKE_CASE : Union[str, Any] = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = s SCREAMING_SNAKE_CASE : List[str] = [] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : Optional[Any] = s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : int = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : List[Any] = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : int = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return sorted_nodes def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = [] SCREAMING_SNAKE_CASE : Any = [] SCREAMING_SNAKE_CASE : List[Any] = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = -2 SCREAMING_SNAKE_CASE : int = [] SCREAMING_SNAKE_CASE : Union[str, Any] = s SCREAMING_SNAKE_CASE : Tuple = False SCREAMING_SNAKE_CASE : Union[str, Any] = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : Union[str, Any] = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): SCREAMING_SNAKE_CASE : Any = len(lowerCamelCase_ ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : int = node[1] break # check if all the children are visited if s == ss: stack.pop() SCREAMING_SNAKE_CASE : int = True if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : Any = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : List[str] = False indirect_parents.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = s SCREAMING_SNAKE_CASE : List[Any] = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return list(lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = [] SCREAMING_SNAKE_CASE : int = [] SCREAMING_SNAKE_CASE : Any = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = -2 SCREAMING_SNAKE_CASE : List[Any] = [] SCREAMING_SNAKE_CASE : Tuple = s SCREAMING_SNAKE_CASE : Dict = False SCREAMING_SNAKE_CASE : Optional[Any] = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : str = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): SCREAMING_SNAKE_CASE : str = len(lowerCamelCase_ ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : Optional[Any] = node[1] break # check if all the children are visited if s == ss: stack.pop() SCREAMING_SNAKE_CASE : Dict = True if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : List[str] = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : List[Any] = False indirect_parents.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = s SCREAMING_SNAKE_CASE : Optional[int] = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return False def lowerCamelCase_ ( self : Any , lowerCamelCase_ : str=-2 , lowerCamelCase_ : int=-1 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = time() self.dfs(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = time() return end - begin def lowerCamelCase_ ( self : int , lowerCamelCase_ : Tuple=-2 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = time() self.bfs(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = time() return end - begin class UpperCamelCase__ : """simple docstring""" def __init__( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = {} def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Union[str, Any]=1 ): '''simple docstring''' if self.graph.get(lowerCamelCase_ ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist SCREAMING_SNAKE_CASE : Any = [[w, v]] # add the other way if self.graph.get(lowerCamelCase_ ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist SCREAMING_SNAKE_CASE : Any = [[w, u]] def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any ): '''simple docstring''' if self.graph.get(lowerCamelCase_ ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowerCamelCase_ ) # the other way round if self.graph.get(lowerCamelCase_ ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(lowerCamelCase_ ) def lowerCamelCase_ ( self : int , lowerCamelCase_ : str=-2 , lowerCamelCase_ : List[str]=-1 ): '''simple docstring''' if s == d: return [] SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : Any = [] if s == -2: SCREAMING_SNAKE_CASE : List[Any] = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : Union[str, Any] = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowerCamelCase_ ) return visited else: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : Any = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : Any = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : List[str] = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return visited def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[str]=-1 ): '''simple docstring''' if c == -1: SCREAMING_SNAKE_CASE : Any = floor(random() * 1_00_00 ) + 10 for i in range(lowerCamelCase_ ): # every vertex has max 100 edges for _ in range(floor(random() * 1_02 ) + 1 ): SCREAMING_SNAKE_CASE : List[str] = floor(random() * c ) + 1 if n != i: self.add_pair(lowerCamelCase_ , lowerCamelCase_ , 1 ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[Any]=-2 ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = deque() SCREAMING_SNAKE_CASE : Tuple = [] if s == -2: SCREAMING_SNAKE_CASE : Any = list(self.graph )[0] d.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) while d: SCREAMING_SNAKE_CASE : List[Any] = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : str ): '''simple docstring''' return len(self.graph[u] ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : Any = [] SCREAMING_SNAKE_CASE : Optional[Any] = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = -2 SCREAMING_SNAKE_CASE : Tuple = [] SCREAMING_SNAKE_CASE : Any = s SCREAMING_SNAKE_CASE : Optional[Any] = False SCREAMING_SNAKE_CASE : Optional[Any] = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : Optional[Any] = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCamelCase_ ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : str = node[1] break # check if all the children are visited if s == ss: stack.pop() SCREAMING_SNAKE_CASE : Optional[int] = True if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : int = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : Union[str, Any] = False indirect_parents.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = s SCREAMING_SNAKE_CASE : str = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return list(lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = [] SCREAMING_SNAKE_CASE : int = [] SCREAMING_SNAKE_CASE : Any = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = -2 SCREAMING_SNAKE_CASE : int = [] SCREAMING_SNAKE_CASE : int = s SCREAMING_SNAKE_CASE : Union[str, Any] = False SCREAMING_SNAKE_CASE : Tuple = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : Any = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCamelCase_ ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : Tuple = node[1] break # check if all the children are visited if s == ss: stack.pop() SCREAMING_SNAKE_CASE : Any = True if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : str = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : Optional[Any] = False indirect_parents.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = s SCREAMING_SNAKE_CASE : Tuple = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return False def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return list(self.graph ) def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[str]=-2 , lowerCamelCase_ : str=-1 ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = time() self.dfs(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = time() return end - begin def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Dict=-2 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = time() self.bfs(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = time() return end - begin
79
0
'''simple docstring''' import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = (DEISMultistepScheduler,) SCREAMING_SNAKE_CASE__ = (('''num_inference_steps''', 25),) def lowerCamelCase_ ( self : List[str] , **lowerCamelCase_ : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = { """num_train_timesteps""": 10_00, """beta_start""": 0.0_001, """beta_end""": 0.02, """beta_schedule""": """linear""", """solver_order""": 2, } config.update(**lowerCamelCase_ ) return config def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Any=0 , **lowerCamelCase_ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = dict(self.forward_default_kwargs ) SCREAMING_SNAKE_CASE : List[str] = kwargs.pop("""num_inference_steps""" , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = self.dummy_sample SCREAMING_SNAKE_CASE : Any = 0.1 * sample SCREAMING_SNAKE_CASE : str = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: SCREAMING_SNAKE_CASE : int = self.get_scheduler_config(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**lowerCamelCase_ ) scheduler.set_timesteps(lowerCamelCase_ ) # copy over dummy past residuals SCREAMING_SNAKE_CASE : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class.from_pretrained(lowerCamelCase_ ) new_scheduler.set_timesteps(lowerCamelCase_ ) # copy over dummy past residuals SCREAMING_SNAKE_CASE : List[str] = dummy_past_residuals[: new_scheduler.config.solver_order] SCREAMING_SNAKE_CASE : str = sample, sample for t in range(lowerCamelCase_ , time_step + scheduler.config.solver_order + 1 ): SCREAMING_SNAKE_CASE : Tuple = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample SCREAMING_SNAKE_CASE : Optional[int] = new_scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' pass def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Union[str, Any]=0 , **lowerCamelCase_ : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = dict(self.forward_default_kwargs ) SCREAMING_SNAKE_CASE : Optional[Any] = kwargs.pop("""num_inference_steps""" , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_sample SCREAMING_SNAKE_CASE : Optional[Any] = 0.1 * sample SCREAMING_SNAKE_CASE : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: SCREAMING_SNAKE_CASE : str = self.get_scheduler_config() SCREAMING_SNAKE_CASE : Any = scheduler_class(**lowerCamelCase_ ) scheduler.set_timesteps(lowerCamelCase_ ) # copy over dummy past residuals (must be after setting timesteps) SCREAMING_SNAKE_CASE : List[str] = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = scheduler_class.from_pretrained(lowerCamelCase_ ) # copy over dummy past residuals new_scheduler.set_timesteps(lowerCamelCase_ ) # copy over dummy past residual (must be after setting timesteps) SCREAMING_SNAKE_CASE : Dict = dummy_past_residuals[: new_scheduler.config.solver_order] SCREAMING_SNAKE_CASE : List[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample SCREAMING_SNAKE_CASE : Tuple = new_scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Union[str, Any]=None , **lowerCamelCase_ : Optional[int] ): '''simple docstring''' if scheduler is None: SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : str = self.get_scheduler_config(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = 10 SCREAMING_SNAKE_CASE : Any = self.dummy_model() SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_sample_deter scheduler.set_timesteps(lowerCamelCase_ ) for i, t in enumerate(scheduler.timesteps ): SCREAMING_SNAKE_CASE : Dict = model(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample return sample def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = dict(self.forward_default_kwargs ) SCREAMING_SNAKE_CASE : int = kwargs.pop("""num_inference_steps""" , lowerCamelCase_ ) for scheduler_class in self.scheduler_classes: SCREAMING_SNAKE_CASE : str = self.get_scheduler_config() SCREAMING_SNAKE_CASE : Optional[Any] = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_sample SCREAMING_SNAKE_CASE : Any = 0.1 * sample if num_inference_steps is not None and hasattr(lowerCamelCase_ , """set_timesteps""" ): scheduler.set_timesteps(lowerCamelCase_ ) elif num_inference_steps is not None and not hasattr(lowerCamelCase_ , """set_timesteps""" ): SCREAMING_SNAKE_CASE : str = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) SCREAMING_SNAKE_CASE : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10] SCREAMING_SNAKE_CASE : int = dummy_past_residuals[: scheduler.config.solver_order] SCREAMING_SNAKE_CASE : int = scheduler.timesteps[5] SCREAMING_SNAKE_CASE : int = scheduler.timesteps[6] SCREAMING_SNAKE_CASE : List[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample SCREAMING_SNAKE_CASE : Tuple = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = DEISMultistepScheduler(**self.get_scheduler_config() ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.full_loop(scheduler=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = torch.mean(torch.abs(lowerCamelCase_ ) ) assert abs(result_mean.item() - 0.23_916 ) < 1e-3 SCREAMING_SNAKE_CASE : List[str] = DPMSolverSinglestepScheduler.from_config(scheduler.config ) SCREAMING_SNAKE_CASE : Tuple = DPMSolverMultistepScheduler.from_config(scheduler.config ) SCREAMING_SNAKE_CASE : Dict = UniPCMultistepScheduler.from_config(scheduler.config ) SCREAMING_SNAKE_CASE : int = DEISMultistepScheduler.from_config(scheduler.config ) SCREAMING_SNAKE_CASE : Optional[Any] = self.full_loop(scheduler=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = torch.mean(torch.abs(lowerCamelCase_ ) ) assert abs(result_mean.item() - 0.23_916 ) < 1e-3 def lowerCamelCase_ ( self : str ): '''simple docstring''' for timesteps in [25, 50, 1_00, 9_99, 10_00]: self.check_over_configs(num_train_timesteps=lowerCamelCase_ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' self.check_over_configs(thresholding=lowerCamelCase_ ) for order in [1, 2, 3]: for solver_type in ["logrho"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=lowerCamelCase_ , prediction_type=lowerCamelCase_ , sample_max_value=lowerCamelCase_ , algorithm_type="""deis""" , solver_order=lowerCamelCase_ , solver_type=lowerCamelCase_ , ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowerCamelCase_ ) def lowerCamelCase_ ( self : str ): '''simple docstring''' for algorithm_type in ["deis"]: for solver_type in ["logrho"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=lowerCamelCase_ , solver_type=lowerCamelCase_ , prediction_type=lowerCamelCase_ , algorithm_type=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Dict = self.full_loop( solver_order=lowerCamelCase_ , solver_type=lowerCamelCase_ , prediction_type=lowerCamelCase_ , algorithm_type=lowerCamelCase_ , ) assert not torch.isnan(lowerCamelCase_ ).any(), "Samples have nan numbers" def lowerCamelCase_ ( self : int ): '''simple docstring''' self.check_over_configs(lower_order_final=lowerCamelCase_ ) self.check_over_configs(lower_order_final=lowerCamelCase_ ) def lowerCamelCase_ ( self : str ): '''simple docstring''' for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]: self.check_over_forward(num_inference_steps=lowerCamelCase_ , time_step=0 ) def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.full_loop() SCREAMING_SNAKE_CASE : List[Any] = torch.mean(torch.abs(lowerCamelCase_ ) ) assert abs(result_mean.item() - 0.23_916 ) < 1e-3 def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.full_loop(prediction_type="""v_prediction""" ) SCREAMING_SNAKE_CASE : Any = torch.mean(torch.abs(lowerCamelCase_ ) ) assert abs(result_mean.item() - 0.091 ) < 1e-3 def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : List[str] = self.get_scheduler_config(thresholding=lowerCamelCase_ , dynamic_thresholding_ratio=0 ) SCREAMING_SNAKE_CASE : Dict = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = 10 SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_model() SCREAMING_SNAKE_CASE : Dict = self.dummy_sample_deter.half() scheduler.set_timesteps(lowerCamelCase_ ) for i, t in enumerate(scheduler.timesteps ): SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample assert sample.dtype == torch.floataa
701
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {"""vocab_file""": """sentencepiece.bpe.model"""} __UpperCAmelCase = { """vocab_file""": { """moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez-orangesum-title""": ( """https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model""" ), }, } __UpperCAmelCase = { """moussaKam/mbarthez""": 1024, """moussaKam/barthez""": 1024, """moussaKam/barthez-orangesum-title""": 1024, } __UpperCAmelCase = """▁""" class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ = ['''input_ids''', '''attention_mask'''] def __init__( self : Optional[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple="<s>" , lowerCamelCase_ : Union[str, Any]="</s>" , lowerCamelCase_ : Tuple="</s>" , lowerCamelCase_ : str="<s>" , lowerCamelCase_ : Optional[int]="<unk>" , lowerCamelCase_ : List[Any]="<pad>" , lowerCamelCase_ : Optional[Any]="<mask>" , lowerCamelCase_ : Optional[Dict[str, Any]] = None , **lowerCamelCase_ : List[str] , ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token SCREAMING_SNAKE_CASE : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Dict = vocab_file SCREAMING_SNAKE_CASE : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Optional[int] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3} SCREAMING_SNAKE_CASE : str = len(self.sp_model ) - 1 SCREAMING_SNAKE_CASE : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id] SCREAMING_SNAKE_CASE : int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase_ )) + [1] return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1] def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id] SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' return len(self.sp_model ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ): '''simple docstring''' return self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : str ): '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] SCREAMING_SNAKE_CASE : List[str] = self.sp_model.PieceToId(lowerCamelCase_ ) return spm_id if spm_id else self.unk_token_id def lowerCamelCase_ ( self : int , lowerCamelCase_ : List[str] ): '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = [] SCREAMING_SNAKE_CASE : Tuple = """""" SCREAMING_SNAKE_CASE : Dict = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(lowerCamelCase_ ) + token SCREAMING_SNAKE_CASE : str = True SCREAMING_SNAKE_CASE : Optional[Any] = [] else: current_sub_tokens.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = False out_string += self.sp_model.decode(lowerCamelCase_ ) return out_string.strip() def __getstate__( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.__dict__.copy() SCREAMING_SNAKE_CASE : List[Any] = None return state def __setstate__( self : Optional[Any] , lowerCamelCase_ : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): SCREAMING_SNAKE_CASE : int = {} SCREAMING_SNAKE_CASE : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(lowerCamelCase_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return SCREAMING_SNAKE_CASE : Dict = os.path.join( lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCamelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(lowerCamelCase_ , """wb""" ) as fi: SCREAMING_SNAKE_CASE : Any = self.sp_model.serialized_model_proto() fi.write(lowerCamelCase_ ) return (out_vocab_file,)
79
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor __UpperCAmelCase = logging.get_logger(__name__) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Optional[int] , *lowerCamelCase_ : List[str] , **lowerCamelCase_ : Any ): '''simple docstring''' warnings.warn( """The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use GLPNImageProcessor instead.""" , lowerCamelCase_ , ) super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
702
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFXLMRobertaModel @require_tf @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = TFXLMRobertaModel.from_pretrained("""jplu/tf-xlm-roberta-base""" ) SCREAMING_SNAKE_CASE : Dict = { """input_ids""": tf.convert_to_tensor([[0, 26_46, 1_02_69, 83, 9_99_42, 2]] , dtype=tf.intaa ), # "My dog is cute" """attention_mask""": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ), } SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ )["""last_hidden_state"""] SCREAMING_SNAKE_CASE : Union[str, Any] = tf.TensorShape((1, 6, 7_68) ) self.assertEqual(output.shape , lowerCamelCase_ ) # compare the actual values for a slice. SCREAMING_SNAKE_CASE : Optional[int] = tf.convert_to_tensor( [ [ [0.0_681_762, 0.10_894_451, 0.06_772_504], [-0.06_423_668, 0.02_366_615, 0.04_329_344], [-0.06_057_295, 0.09_974_135, -0.00_070_584], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
79
0
'''simple docstring''' import shutil import tempfile import unittest from transformers import ( SPIECE_UNDERLINE, AddedToken, BatchEncoding, NllbTokenizer, NllbTokenizerFast, is_torch_available, ) from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin __UpperCAmelCase = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right __UpperCAmelCase = 256047 __UpperCAmelCase = 256145 @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = NllbTokenizer SCREAMING_SNAKE_CASE__ = NllbTokenizerFast SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = {} def lowerCamelCase_ ( self : int ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE : Union[str, Any] = NllbTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = NllbTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(lowerCamelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( lowerCamelCase_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) self.assertListEqual( lowerCamelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.convert_ids_to_tokens(lowerCamelCase_ ) self.assertListEqual( lowerCamelCase_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-nllb""", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): SCREAMING_SNAKE_CASE : int = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp() SCREAMING_SNAKE_CASE : List[str] = tokenizer_r.save_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_p.save_pretrained(lowerCamelCase_ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) SCREAMING_SNAKE_CASE : Any = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f ) self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ ) # Checks everything loads correctly in the same way SCREAMING_SNAKE_CASE : Dict = tokenizer_r.from_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = tokenizer_p.from_pretrained(lowerCamelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) ) shutil.rmtree(lowerCamelCase_ ) # Save tokenizer rust, legacy_format=True SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp() SCREAMING_SNAKE_CASE : str = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = tokenizer_p.save_pretrained(lowerCamelCase_ ) # Checks it save with the same files self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ ) # Checks everything loads correctly in the same way SCREAMING_SNAKE_CASE : str = tokenizer_r.from_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = tokenizer_p.from_pretrained(lowerCamelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) ) shutil.rmtree(lowerCamelCase_ ) # Save tokenizer rust, legacy_format=False SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp() SCREAMING_SNAKE_CASE : Dict = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = tokenizer_p.save_pretrained(lowerCamelCase_ ) # Checks it saved the tokenizer.json file self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way SCREAMING_SNAKE_CASE : int = tokenizer_r.from_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.from_pretrained(lowerCamelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) ) shutil.rmtree(lowerCamelCase_ ) @require_torch def lowerCamelCase_ ( self : Dict ): '''simple docstring''' if not self.test_seqaseq: return SCREAMING_SNAKE_CASE : int = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Longer text that will definitely require truncation. SCREAMING_SNAKE_CASE : Optional[int] = [ """ UN Chief Says There Is No Military Solution in Syria""", """ Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for""" """ Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons""" """ will only worsen the violence and misery for millions of people.""", ] SCREAMING_SNAKE_CASE : Tuple = [ """Şeful ONU declară că nu există o soluţie militară în Siria""", """Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al""" """ Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi""" """ că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""", ] try: SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.prepare_seqaseq_batch( src_texts=lowerCamelCase_ , tgt_texts=lowerCamelCase_ , max_length=3 , max_target_length=10 , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" , ) except NotImplementedError: return self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.labels.shape[1] , 10 ) # max_target_length will default to max_length if not specified SCREAMING_SNAKE_CASE : List[str] = tokenizer.prepare_seqaseq_batch( lowerCamelCase_ , tgt_texts=lowerCamelCase_ , max_length=3 , return_tensors="""pt""" ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.labels.shape[1] , 3 ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.prepare_seqaseq_batch( src_texts=lowerCamelCase_ , max_length=3 , max_target_length=10 , return_tensors="""pt""" ) self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 ) self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 ) self.assertNotIn("""decoder_input_ids""" , lowerCamelCase_ ) @unittest.skip("""Unfortunately way too slow to build a BPE with SentencePiece.""" ) def lowerCamelCase_ ( self : str ): '''simple docstring''' pass def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): SCREAMING_SNAKE_CASE : int = [AddedToken("""<special>""" , lstrip=lowerCamelCase_ )] SCREAMING_SNAKE_CASE : int = self.rust_tokenizer_class.from_pretrained( lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , **lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.encode("""Hey this is a <special> token""" ) SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.encode("""<special>""" , add_special_tokens=lowerCamelCase_ )[0] self.assertTrue(special_token_id in r_output ) if self.test_slow_tokenizer: SCREAMING_SNAKE_CASE : Dict = self.rust_tokenizer_class.from_pretrained( lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , **lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class.from_pretrained( lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , **lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_p.encode("""Hey this is a <special> token""" ) SCREAMING_SNAKE_CASE : Any = tokenizer_cr.encode("""Hey this is a <special> token""" ) self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) self.assertTrue(special_token_id in p_output ) self.assertTrue(special_token_id in cr_output ) @require_torch @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''facebook/nllb-200-distilled-600M''' SCREAMING_SNAKE_CASE__ = [ ''' UN Chief Says There Is No Military Solution in Syria''', ''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''', ] SCREAMING_SNAKE_CASE__ = [ '''Şeful ONU declară că nu există o soluţie militară în Siria''', '''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei''' ''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor''' ''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''', ] SCREAMING_SNAKE_CASE__ = [ 25_6047, 1_6297, 13_4408, 8165, 24_8066, 1_4734, 950, 1135, 10_5721, 3573, 83, 2_7352, 108, 4_9486, 2, ] @classmethod def lowerCamelCase_ ( cls : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : NllbTokenizer = NllbTokenizer.from_pretrained( cls.checkpoint_name , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" ) SCREAMING_SNAKE_CASE : List[Any] = 1 return cls def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Arab"""] , 25_60_01 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Latn"""] , 25_60_02 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""fra_Latn"""] , 25_60_57 ) def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' self.assertIn(lowerCamelCase_ , self.tokenizer.all_special_ids ) # fmt: off SCREAMING_SNAKE_CASE : Tuple = [RO_CODE, 42_54, 9_80_68, 11_29_23, 3_90_72, 39_09, 7_13, 10_27_67, 26, 1_73_14, 3_56_42, 1_46_83, 3_31_18, 20_22, 6_69_87, 2, 25_60_47] # fmt: on SCREAMING_SNAKE_CASE : Dict = self.tokenizer.decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCamelCase_ ) self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) self.assertNotIn(self.tokenizer.eos_token , lowerCamelCase_ ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = ["""this is gunna be a long sentence """ * 20] assert isinstance(src_text[0] , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = 10 SCREAMING_SNAKE_CASE : int = self.tokenizer(lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ ).input_ids[0] self.assertEqual(ids[-1] , 2 ) self.assertEqual(ids[0] , lowerCamelCase_ ) self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [25_62_03, 3] ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp() SCREAMING_SNAKE_CASE : str = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = NllbTokenizer.from_pretrained(lowerCamelCase_ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCamelCase_ ) @require_torch def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , ) SCREAMING_SNAKE_CASE : List[str] = shift_tokens_right( batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["""ron_Latn"""] ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) self.assertEqual((2, 15) , batch.input_ids.shape ) self.assertEqual((2, 15) , batch.attention_mask.shape ) SCREAMING_SNAKE_CASE : Union[str, Any] = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ ) self.assertEqual(lowerCamelCase_ , batch.decoder_input_ids[0, 0] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.tokenizer(self.src_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=3 , return_tensors="""pt""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer( text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=10 , return_tensors="""pt""" ) SCREAMING_SNAKE_CASE : str = targets["""input_ids"""] SCREAMING_SNAKE_CASE : Optional[int] = shift_tokens_right( lowerCamelCase_ , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.tokenizer._build_translation_inputs( """A test""" , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" ) self.assertEqual( nested_simplify(lowerCamelCase_ ) , { # A, test, EOS, en_XX """input_ids""": [[25_60_47, 70, 73_56, 2]], """attention_mask""": [[1, 1, 1, 1]], # ar_AR """forced_bos_token_id""": 25_60_57, } , ) @require_torch def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = True SCREAMING_SNAKE_CASE : Dict = self.tokenizer( """UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" ) self.assertEqual( inputs.input_ids , [1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2, 25_60_47] ) SCREAMING_SNAKE_CASE : List[str] = False SCREAMING_SNAKE_CASE : int = self.tokenizer( """UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" ) self.assertEqual( inputs.input_ids , [25_60_47, 1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2] )
703
'''simple docstring''' from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = None # Automatically constructed SCREAMING_SNAKE_CASE__ = "dict" SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = field(default='''Translation''' , init=lowercase_ , repr=lowercase_ ) def __call__( self : int ): '''simple docstring''' return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' from .features import Value return {k: Value("""string""" ) for k in sorted(self.languages )} @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None # Automatically constructed SCREAMING_SNAKE_CASE__ = "dict" SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = field(default='''TranslationVariableLanguages''' , init=lowercase_ , repr=lowercase_ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = sorted(set(self.languages ) ) if self.languages else None SCREAMING_SNAKE_CASE : str = len(self.languages ) if self.languages else None def __call__( self : Tuple ): '''simple docstring''' return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = set(self.languages ) if self.languages and set(lowerCamelCase_ ) - lang_set: raise ValueError( f'''Some languages in example ({", ".join(sorted(set(lowerCamelCase_ ) - lang_set ) )}) are not in valid set ({", ".join(lowerCamelCase_ )}).''' ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. SCREAMING_SNAKE_CASE : List[Any] = [] for lang, text in translation_dict.items(): if isinstance(lowerCamelCase_ , lowerCamelCase_ ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = zip(*sorted(lowerCamelCase_ ) ) return {"language": languages, "translation": translations} def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' from .features import Sequence, Value return { "language": Sequence(Value("""string""" ) ), "translation": Sequence(Value("""string""" ) ), }
79
0
'''simple docstring''' import os from typing import Dict, List, Tuple, TypeVar, Union __UpperCAmelCase = TypeVar("""T""") __UpperCAmelCase = Union[List[T], Tuple[T, ...]] __UpperCAmelCase = Union[T, List[T], Dict[str, T]] __UpperCAmelCase = Union[str, bytes, os.PathLike]
704
'''simple docstring''' import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor __UpperCAmelCase = logging.get_logger(__name__) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Dict , *lowerCamelCase_ : List[str] , **lowerCamelCase_ : Dict ): '''simple docstring''' warnings.warn( """The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use FlavaImageProcessor instead.""" , lowerCamelCase_ , ) super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
79
0
'''simple docstring''' from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from transformers.modeling_outputs import BaseModelOutput from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING __UpperCAmelCase = logging.get_logger(__name__) @add_end_docstrings(lowercase_ ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Tuple , **lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' super().__init__(**lowerCamelCase_ ) if self.framework == "tf": raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' ) requires_backends(self , """vision""" ) self.check_model_type(lowerCamelCase_ ) def __call__( self : int , lowerCamelCase_ : Union[str, "Image.Image", List[Dict[str, Any]]] , lowerCamelCase_ : Union[str, List[str]] = None , **lowerCamelCase_ : Tuple , ): '''simple docstring''' if "text_queries" in kwargs: SCREAMING_SNAKE_CASE : List[str] = kwargs.pop("""text_queries""" ) if isinstance(lowerCamelCase_ , (str, Image.Image) ): SCREAMING_SNAKE_CASE : int = {"""image""": image, """candidate_labels""": candidate_labels} else: SCREAMING_SNAKE_CASE : Optional[Any] = image SCREAMING_SNAKE_CASE : List[str] = super().__call__(lowerCamelCase_ , **lowerCamelCase_ ) return results def lowerCamelCase_ ( self : str , **lowerCamelCase_ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = {} if "threshold" in kwargs: SCREAMING_SNAKE_CASE : Any = kwargs["""threshold"""] if "top_k" in kwargs: SCREAMING_SNAKE_CASE : Tuple = kwargs["""top_k"""] return {}, {}, postprocess_params def lowerCamelCase_ ( self : int , lowerCamelCase_ : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = load_image(inputs["""image"""] ) SCREAMING_SNAKE_CASE : Optional[Any] = inputs["""candidate_labels"""] if isinstance(lowerCamelCase_ , lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Dict = candidate_labels.split(""",""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[image.height, image.width]] , dtype=torch.intaa ) for i, candidate_label in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Any = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework ) SCREAMING_SNAKE_CASE : List[str] = self.image_processor(lowerCamelCase_ , return_tensors=self.framework ) yield { "is_last": i == len(lowerCamelCase_ ) - 1, "target_size": target_size, "candidate_label": candidate_label, **text_inputs, **image_features, } def lowerCamelCase_ ( self : Any , lowerCamelCase_ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = model_inputs.pop("""target_size""" ) SCREAMING_SNAKE_CASE : Any = model_inputs.pop("""candidate_label""" ) SCREAMING_SNAKE_CASE : str = model_inputs.pop("""is_last""" ) SCREAMING_SNAKE_CASE : List[Any] = self.model(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs} return model_outputs def lowerCamelCase_ ( self : int , lowerCamelCase_ : str , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Union[str, Any]=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = [] for model_output in model_outputs: SCREAMING_SNAKE_CASE : List[str] = model_output["""candidate_label"""] SCREAMING_SNAKE_CASE : Union[str, Any] = BaseModelOutput(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = self.image_processor.post_process_object_detection( outputs=lowerCamelCase_ , threshold=lowerCamelCase_ , target_sizes=model_output["""target_size"""] )[0] for index in outputs["scores"].nonzero(): SCREAMING_SNAKE_CASE : str = outputs["""scores"""][index].item() SCREAMING_SNAKE_CASE : List[Any] = self._get_bounding_box(outputs["""boxes"""][index][0] ) SCREAMING_SNAKE_CASE : Any = {"""score""": score, """label""": label, """box""": box} results.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : x["score"] , reverse=lowerCamelCase_ ) if top_k: SCREAMING_SNAKE_CASE : str = results[:top_k] return results def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : "torch.Tensor" ): '''simple docstring''' if self.framework != "pt": raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = box.int().tolist() SCREAMING_SNAKE_CASE : Dict = { """xmin""": xmin, """ymin""": ymin, """xmax""": xmax, """ymax""": ymax, } return bbox
705
'''simple docstring''' from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging __UpperCAmelCase = logging.get_logger(__name__) class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = None @staticmethod def lowerCamelCase_ ( ): '''simple docstring''' raise NotImplementedError def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : str ): '''simple docstring''' raise NotImplementedError def lowerCamelCase_ ( self : int , lowerCamelCase_ : Dict ): '''simple docstring''' raise NotImplementedError def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' if not self.is_available(): raise RuntimeError( f'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' ) @classmethod def lowerCamelCase_ ( cls : Any ): '''simple docstring''' return f'''`pip install {cls.pip_package or cls.name}`''' class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''optuna''' @staticmethod def lowerCamelCase_ ( ): '''simple docstring''' return is_optuna_available() def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : Dict ): '''simple docstring''' return run_hp_search_optuna(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) def lowerCamelCase_ ( self : str , lowerCamelCase_ : Any ): '''simple docstring''' return default_hp_space_optuna(lowerCamelCase_ ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''ray''' SCREAMING_SNAKE_CASE__ = '''\'ray[tune]\'''' @staticmethod def lowerCamelCase_ ( ): '''simple docstring''' return is_ray_available() def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : int ): '''simple docstring''' return run_hp_search_ray(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Optional[int] ): '''simple docstring''' return default_hp_space_ray(lowerCamelCase_ ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''sigopt''' @staticmethod def lowerCamelCase_ ( ): '''simple docstring''' return is_sigopt_available() def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : int ): '''simple docstring''' return run_hp_search_sigopt(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' return default_hp_space_sigopt(lowerCamelCase_ ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''wandb''' @staticmethod def lowerCamelCase_ ( ): '''simple docstring''' return is_wandb_available() def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : Optional[Any] ): '''simple docstring''' return run_hp_search_wandb(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Tuple ): '''simple docstring''' return default_hp_space_wandb(lowerCamelCase_ ) __UpperCAmelCase = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(lowerCamelCase_ ) > 0: SCREAMING_SNAKE_CASE : List[Any] = available_backends[0].name if len(lowerCamelCase_ ) > 1: logger.info( f'''{len(lowerCamelCase_ )} hyperparameter search backends available. Using {name} as the default.''' ) return name raise RuntimeError( """No hyperparameter search backend available.\n""" + """\n""".join( f''' - To install {backend.name} run {backend.pip_install()}''' for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
79
0
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __UpperCAmelCase = { """configuration_vivit""": ["""VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VivitConfig"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ["""VivitImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ """VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """VivitModel""", """VivitPreTrainedModel""", """VivitForVideoClassification""", ] if TYPE_CHECKING: from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_vivit import VivitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vivit import ( VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST, VivitForVideoClassification, VivitModel, VivitPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
706
'''simple docstring''' import glob import os import random from string import ascii_lowercase, digits import cva __UpperCAmelCase = """""" __UpperCAmelCase = """""" __UpperCAmelCase = """""" __UpperCAmelCase = 1 # (0 is vertical, 1 is horizontal) def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = get_dataset(lowerCamelCase_ , lowerCamelCase_ ) print("""Processing...""" ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = update_image_and_anno(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) for index, image in enumerate(lowerCamelCase_ ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' SCREAMING_SNAKE_CASE : Optional[int] = random_chars(32 ) SCREAMING_SNAKE_CASE : Optional[Any] = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0] SCREAMING_SNAKE_CASE : Dict = f'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}''' cva.imwrite(f'''/{file_root}.jpg''' , lowerCamelCase_ , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f'''Success {index+1}/{len(lowerCamelCase_ )} with {file_name}''' ) SCREAMING_SNAKE_CASE : Optional[Any] = [] for anno in new_annos[index]: SCREAMING_SNAKE_CASE : Optional[Any] = f'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}''' annos_list.append(lowerCamelCase_ ) with open(f'''/{file_root}.txt''' , """w""" ) as outfile: outfile.write("""\n""".join(line for line in annos_list ) ) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = [] SCREAMING_SNAKE_CASE : Any = [] for label_file in glob.glob(os.path.join(lowerCamelCase_ , """*.txt""" ) ): SCREAMING_SNAKE_CASE : str = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0] with open(lowerCamelCase_ ) as in_file: SCREAMING_SNAKE_CASE : Any = in_file.readlines() SCREAMING_SNAKE_CASE : List[Any] = os.path.join(lowerCamelCase_ , f'''{label_name}.jpg''' ) SCREAMING_SNAKE_CASE : Tuple = [] for obj_list in obj_lists: SCREAMING_SNAKE_CASE : Union[str, Any] = obj_list.rstrip("""\n""" ).split(""" """ ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(lowerCamelCase_ ) labels.append(lowerCamelCase_ ) return img_paths, labels def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = [] SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : Optional[Any] = [] for idx in range(len(lowerCamelCase_ ) ): SCREAMING_SNAKE_CASE : List[Any] = [] SCREAMING_SNAKE_CASE : Dict = img_list[idx] path_list.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = anno_list[idx] SCREAMING_SNAKE_CASE : Optional[Any] = cva.imread(lowerCamelCase_ ) if flip_type == 1: SCREAMING_SNAKE_CASE : List[str] = cva.flip(lowerCamelCase_ , lowerCamelCase_ ) for bbox in img_annos: SCREAMING_SNAKE_CASE : List[Any] = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: SCREAMING_SNAKE_CASE : Any = cva.flip(lowerCamelCase_ , lowerCamelCase_ ) for bbox in img_annos: SCREAMING_SNAKE_CASE : Optional[Any] = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(lowerCamelCase_ ) new_imgs_list.append(lowerCamelCase_ ) return new_imgs_list, new_annos_lists, path_list def __A ( lowerCamelCase_ = 32 ): """simple docstring""" assert number_char > 1, "The number of character should greater than 1" SCREAMING_SNAKE_CASE : Dict = ascii_lowercase + digits return "".join(random.choice(lowerCamelCase_ ) for _ in range(lowerCamelCase_ ) ) if __name__ == "__main__": main() print("""DONE ✅""")
79
0
'''simple docstring''' def __A ( lowerCamelCase_ ): """simple docstring""" if len(lowerCamelCase_ ) <= 1: return lst SCREAMING_SNAKE_CASE : Tuple = 1 while i < len(lowerCamelCase_ ): if lst[i - 1] <= lst[i]: i += 1 else: SCREAMING_SNAKE_CASE : str = lst[i], lst[i - 1] i -= 1 if i == 0: SCREAMING_SNAKE_CASE : Dict = 1 return lst if __name__ == "__main__": __UpperCAmelCase = input("""Enter numbers separated by a comma:\n""").strip() __UpperCAmelCase = [int(item) for item in user_input.split(""",""")] print(gnome_sort(unsorted))
707
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { """google/vivit-b-16x2-kinetics400""": ( """https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json""" ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''vivit''' def __init__( self : Tuple , lowerCamelCase_ : str=2_24 , lowerCamelCase_ : List[Any]=32 , lowerCamelCase_ : Tuple=[2, 16, 16] , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Dict=7_68 , lowerCamelCase_ : Dict=12 , lowerCamelCase_ : Any=12 , lowerCamelCase_ : List[Any]=30_72 , lowerCamelCase_ : List[str]="gelu_fast" , lowerCamelCase_ : str=0.0 , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : Optional[int]=0.02 , lowerCamelCase_ : List[Any]=1e-06 , lowerCamelCase_ : Tuple=True , **lowerCamelCase_ : Tuple , ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = hidden_size SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers SCREAMING_SNAKE_CASE : List[str] = num_attention_heads SCREAMING_SNAKE_CASE : str = intermediate_size SCREAMING_SNAKE_CASE : List[Any] = hidden_act SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Tuple = initializer_range SCREAMING_SNAKE_CASE : str = layer_norm_eps SCREAMING_SNAKE_CASE : str = image_size SCREAMING_SNAKE_CASE : Dict = num_frames SCREAMING_SNAKE_CASE : Optional[Any] = tubelet_size SCREAMING_SNAKE_CASE : Dict = num_channels SCREAMING_SNAKE_CASE : int = qkv_bias super().__init__(**lowerCamelCase_ )
79
0
'''simple docstring''' import json import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from transformers import OneFormerImageProcessor from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput if is_vision_available(): from PIL import Image def __A ( lowerCamelCase_ , lowerCamelCase_="shi-labs/oneformer_demo" ): """simple docstring""" with open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="""dataset""" ) , """r""" ) as f: SCREAMING_SNAKE_CASE : Any = json.load(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = {} SCREAMING_SNAKE_CASE : Tuple = [] SCREAMING_SNAKE_CASE : Optional[int] = [] for key, info in class_info.items(): SCREAMING_SNAKE_CASE : str = info["""name"""] class_names.append(info["""name"""] ) if info["isthing"]: thing_ids.append(int(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : List[str] = thing_ids SCREAMING_SNAKE_CASE : List[str] = class_names return metadata class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any]=7 , lowerCamelCase_ : Dict=3 , lowerCamelCase_ : str=30 , lowerCamelCase_ : List[str]=4_00 , lowerCamelCase_ : int=None , lowerCamelCase_ : int=True , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Dict=[0.5, 0.5, 0.5] , lowerCamelCase_ : Any=[0.5, 0.5, 0.5] , lowerCamelCase_ : str=10 , lowerCamelCase_ : Any=False , lowerCamelCase_ : Union[str, Any]=2_55 , lowerCamelCase_ : Optional[Any]="shi-labs/oneformer_demo" , lowerCamelCase_ : Tuple="ade20k_panoptic.json" , lowerCamelCase_ : int=10 , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = parent SCREAMING_SNAKE_CASE : Any = batch_size SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels SCREAMING_SNAKE_CASE : List[str] = min_resolution SCREAMING_SNAKE_CASE : Union[str, Any] = max_resolution SCREAMING_SNAKE_CASE : Dict = do_resize SCREAMING_SNAKE_CASE : Tuple = {"""shortest_edge""": 32, """longest_edge""": 13_33} if size is None else size SCREAMING_SNAKE_CASE : Any = do_normalize SCREAMING_SNAKE_CASE : Optional[Any] = image_mean SCREAMING_SNAKE_CASE : List[Any] = image_std SCREAMING_SNAKE_CASE : List[Any] = class_info_file SCREAMING_SNAKE_CASE : Tuple = prepare_metadata(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = num_text SCREAMING_SNAKE_CASE : int = repo_path # for the post_process_functions SCREAMING_SNAKE_CASE : List[Any] = 2 SCREAMING_SNAKE_CASE : Union[str, Any] = 10 SCREAMING_SNAKE_CASE : Tuple = 10 SCREAMING_SNAKE_CASE : List[str] = 3 SCREAMING_SNAKE_CASE : Optional[int] = 4 SCREAMING_SNAKE_CASE : Any = num_labels SCREAMING_SNAKE_CASE : Tuple = do_reduce_labels SCREAMING_SNAKE_CASE : Dict = ignore_index def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "num_labels": self.num_labels, "do_reduce_labels": self.do_reduce_labels, "ignore_index": self.ignore_index, "class_info_file": self.class_info_file, "metadata": self.metadata, "num_text": self.num_text, } def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=False ): '''simple docstring''' if not batched: SCREAMING_SNAKE_CASE : Union[str, Any] = image_inputs[0] if isinstance(lowerCamelCase_ , Image.Image ): SCREAMING_SNAKE_CASE : Dict = image.size else: SCREAMING_SNAKE_CASE : Tuple = image.shape[1], image.shape[2] if w < h: SCREAMING_SNAKE_CASE : List[Any] = int(self.size["""shortest_edge"""] * h / w ) SCREAMING_SNAKE_CASE : Optional[Any] = self.size["""shortest_edge"""] elif w > h: SCREAMING_SNAKE_CASE : Optional[Any] = self.size["""shortest_edge"""] SCREAMING_SNAKE_CASE : Optional[int] = int(self.size["""shortest_edge"""] * w / h ) else: SCREAMING_SNAKE_CASE : List[str] = self.size["""shortest_edge"""] SCREAMING_SNAKE_CASE : List[str] = self.size["""shortest_edge"""] else: SCREAMING_SNAKE_CASE : Optional[int] = [] for image in image_inputs: SCREAMING_SNAKE_CASE : List[str] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) SCREAMING_SNAKE_CASE : Any = max(lowerCamelCase_ , key=lambda lowerCamelCase_ : item[0] )[0] SCREAMING_SNAKE_CASE : Any = max(lowerCamelCase_ , key=lambda lowerCamelCase_ : item[1] )[1] return expected_height, expected_width def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return OneFormerForUniversalSegmentationOutput( # +1 for null class class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , ) @require_torch @require_vision class UpperCamelCase__ ( lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None # only for test_image_processing_common.test_image_proc_to_json_string SCREAMING_SNAKE_CASE__ = image_processing_class def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = OneFormerImageProcessorTester(self ) @property def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return self.image_processing_tester.prepare_image_processor_dict() def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase_ , """image_mean""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """image_std""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """do_normalize""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """do_resize""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """size""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """ignore_index""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """class_info_file""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """num_text""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """repo_path""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """metadata""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """do_reduce_labels""" ) ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' pass def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE : Optional[int] = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE : int = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values SCREAMING_SNAKE_CASE : Any = self.image_processing_tester.get_expected_values(lowerCamelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_tester.get_expected_values(lowerCamelCase_ , batched=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = image_processor( lowerCamelCase_ , ["""semantic"""] * len(lowerCamelCase_ ) , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , np.ndarray ) # Test not batched input SCREAMING_SNAKE_CASE : Dict = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values SCREAMING_SNAKE_CASE : int = self.image_processing_tester.get_expected_values(lowerCamelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_tester.get_expected_values(lowerCamelCase_ , batched=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = image_processor( lowerCamelCase_ , ["""semantic"""] * len(lowerCamelCase_ ) , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE : Optional[int] = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values SCREAMING_SNAKE_CASE : int = self.image_processing_tester.get_expected_values(lowerCamelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_tester.get_expected_values(lowerCamelCase_ , batched=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor( lowerCamelCase_ , ["""semantic"""] * len(lowerCamelCase_ ) , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def lowerCamelCase_ ( self : int , lowerCamelCase_ : List[Any]=False , lowerCamelCase_ : Union[str, Any]=False , lowerCamelCase_ : Tuple="np" ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # prepare image and target SCREAMING_SNAKE_CASE : Any = self.image_processing_tester.num_labels SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCamelCase_ ) if with_segmentation_maps: SCREAMING_SNAKE_CASE : str = num_labels if is_instance_map: SCREAMING_SNAKE_CASE : Dict = list(range(lowerCamelCase_ ) ) * 2 SCREAMING_SNAKE_CASE : List[Any] = dict(enumerate(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Tuple = [ np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs ] if segmentation_type == "pil": SCREAMING_SNAKE_CASE : List[str] = [Image.fromarray(lowerCamelCase_ ) for annotation in annotations] SCREAMING_SNAKE_CASE : str = image_processor( lowerCamelCase_ , ["""semantic"""] * len(lowerCamelCase_ ) , lowerCamelCase_ , return_tensors="""pt""" , instance_id_to_semantic_id=lowerCamelCase_ , pad_and_return_pixel_mask=lowerCamelCase_ , ) return inputs def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' pass def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' def common(lowerCamelCase_ : List[str]=False , lowerCamelCase_ : int=None ): SCREAMING_SNAKE_CASE : Optional[Any] = self.comm_get_image_processor_inputs( with_segmentation_maps=lowerCamelCase_ , is_instance_map=lowerCamelCase_ , segmentation_type=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = inputs["""mask_labels"""] SCREAMING_SNAKE_CASE : Optional[Any] = inputs["""class_labels"""] SCREAMING_SNAKE_CASE : Tuple = inputs["""pixel_values"""] SCREAMING_SNAKE_CASE : Optional[Any] = inputs["""text_inputs"""] # check the batch_size for mask_label, class_label, text_input in zip(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): self.assertEqual(mask_label.shape[0] , class_label.shape[0] ) # this ensure padding has happened self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] ) self.assertEqual(len(lowerCamelCase_ ) , self.image_processing_tester.num_text ) common() common(is_instance_map=lowerCamelCase_ ) common(is_instance_map=lowerCamelCase_ , segmentation_type="""pil""" ) common(is_instance_map=lowerCamelCase_ , segmentation_type="""pil""" ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = np.zeros((20, 50) ) SCREAMING_SNAKE_CASE : Dict = 1 SCREAMING_SNAKE_CASE : str = 1 SCREAMING_SNAKE_CASE : Tuple = 1 SCREAMING_SNAKE_CASE : Tuple = binary_mask_to_rle(lowerCamelCase_ ) self.assertEqual(len(lowerCamelCase_ ) , 4 ) self.assertEqual(rle[0] , 21 ) self.assertEqual(rle[1] , 45 ) def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , ) SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_tester.get_fake_oneformer_outputs() SCREAMING_SNAKE_CASE : Optional[Any] = fature_extractor.post_process_semantic_segmentation(lowerCamelCase_ ) self.assertEqual(len(lowerCamelCase_ ) , self.image_processing_tester.batch_size ) self.assertEqual( segmentation[0].shape , ( self.image_processing_tester.height, self.image_processing_tester.width, ) , ) SCREAMING_SNAKE_CASE : Optional[Any] = [(1, 4) for i in range(self.image_processing_tester.batch_size )] SCREAMING_SNAKE_CASE : Dict = fature_extractor.post_process_semantic_segmentation(lowerCamelCase_ , target_sizes=lowerCamelCase_ ) self.assertEqual(segmentation[0].shape , target_sizes[0] ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , ) SCREAMING_SNAKE_CASE : List[str] = self.image_processing_tester.get_fake_oneformer_outputs() SCREAMING_SNAKE_CASE : List[Any] = image_processor.post_process_instance_segmentation(lowerCamelCase_ , threshold=0 ) self.assertTrue(len(lowerCamelCase_ ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue("""segmentation""" in el ) self.assertTrue("""segments_info""" in el ) self.assertEqual(type(el["""segments_info"""] ) , lowerCamelCase_ ) self.assertEqual( el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , ) SCREAMING_SNAKE_CASE : List[str] = self.image_processing_tester.get_fake_oneformer_outputs() SCREAMING_SNAKE_CASE : Any = image_processor.post_process_panoptic_segmentation(lowerCamelCase_ , threshold=0 ) self.assertTrue(len(lowerCamelCase_ ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue("""segmentation""" in el ) self.assertTrue("""segments_info""" in el ) self.assertEqual(type(el["""segments_info"""] ) , lowerCamelCase_ ) self.assertEqual( el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
708
'''simple docstring''' import math class UpperCamelCase__ : """simple docstring""" def __init__( self : List[str] , lowerCamelCase_ : Tuple=0 ): # a graph with Node 0,1,...,N-1 '''simple docstring''' SCREAMING_SNAKE_CASE : Any = n SCREAMING_SNAKE_CASE : Optional[int] = [ [math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ ) ] # adjacency matrix for weight SCREAMING_SNAKE_CASE : Union[str, Any] = [ [math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ ) ] # dp[i][j] stores minimum distance from i to j def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = w def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): SCREAMING_SNAKE_CASE : Union[str, Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' return self.dp[u][v] if __name__ == "__main__": __UpperCAmelCase = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
79
0
'''simple docstring''' from __future__ import annotations import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTForImageClassification, TFViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCamelCase__ : """simple docstring""" def __init__( self : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict=13 , lowerCamelCase_ : Union[str, Any]=30 , lowerCamelCase_ : Any=2 , lowerCamelCase_ : str=3 , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : List[Any]=32 , lowerCamelCase_ : Tuple=2 , lowerCamelCase_ : Any=4 , lowerCamelCase_ : Dict=37 , lowerCamelCase_ : List[Any]="gelu" , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : Any=10 , lowerCamelCase_ : str=0.02 , lowerCamelCase_ : int=3 , lowerCamelCase_ : Optional[int]=None , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = parent SCREAMING_SNAKE_CASE : int = batch_size SCREAMING_SNAKE_CASE : Optional[Any] = image_size SCREAMING_SNAKE_CASE : int = patch_size SCREAMING_SNAKE_CASE : Tuple = num_channels SCREAMING_SNAKE_CASE : List[Any] = is_training SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels SCREAMING_SNAKE_CASE : str = hidden_size SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads SCREAMING_SNAKE_CASE : List[Any] = intermediate_size SCREAMING_SNAKE_CASE : List[Any] = hidden_act SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : int = type_sequence_label_size SCREAMING_SNAKE_CASE : Tuple = initializer_range SCREAMING_SNAKE_CASE : Union[str, Any] = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) SCREAMING_SNAKE_CASE : Tuple = (image_size // patch_size) ** 2 SCREAMING_SNAKE_CASE : Union[str, Any] = num_patches + 1 def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE : List[str] = None if self.use_labels: SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE : Optional[int] = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , ) def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = TFViTModel(config=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ , training=lowerCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # Test with an image with different size than the one specified in config. SCREAMING_SNAKE_CASE : Optional[Any] = self.image_size // 2 SCREAMING_SNAKE_CASE : Dict = pixel_values[:, :, :image_size, :image_size] SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , interpolate_pos_encoding=lowerCamelCase_ , training=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = (image_size // self.patch_size) ** 2 + 1 self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.type_sequence_label_size SCREAMING_SNAKE_CASE : Any = TFViTForImageClassification(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ , labels=lowerCamelCase_ , training=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # Test with an image with different size than the one specified in config. SCREAMING_SNAKE_CASE : Optional[Any] = self.image_size // 2 SCREAMING_SNAKE_CASE : Any = pixel_values[:, :, :image_size, :image_size] SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ , interpolate_pos_encoding=lowerCamelCase_ , training=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images SCREAMING_SNAKE_CASE : Union[str, Any] = 1 SCREAMING_SNAKE_CASE : str = TFViTForImageClassification(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs SCREAMING_SNAKE_CASE : int = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class UpperCamelCase__ ( lowercase_ , lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else () SCREAMING_SNAKE_CASE__ = ( {'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification} if is_tf_available() else {} ) SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = TFViTModelTester(self ) SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="""ViT does not use inputs_embeds""" ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' pass @unittest.skip(reason="""ViT does not use inputs_embeds""" ) def lowerCamelCase_ ( self : int ): '''simple docstring''' pass def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : Any = model_class(lowerCamelCase_ ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) SCREAMING_SNAKE_CASE : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase_ , tf.keras.layers.Layer ) ) def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : List[str] = model_class(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE : Union[str, Any] = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE : Tuple = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ ) @slow def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = TFViTModel.from_pretrained("""google/vit-base-patch16-224""" ) self.assertIsNotNone(lowerCamelCase_ ) def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @cached_property def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None @slow def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = TFViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" ) SCREAMING_SNAKE_CASE : str = self.default_image_processor SCREAMING_SNAKE_CASE : int = prepare_img() SCREAMING_SNAKE_CASE : Tuple = image_processor(images=lowerCamelCase_ , return_tensors="""tf""" ) # forward pass SCREAMING_SNAKE_CASE : int = model(**lowerCamelCase_ ) # verify the logits SCREAMING_SNAKE_CASE : Any = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = tf.constant([-0.2_744, 0.8_215, -0.0_836] ) tf.debugging.assert_near(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 )
709
'''simple docstring''' import math def __A ( lowerCamelCase_ ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __A ( lowerCamelCase_ = 1_00_01 ): """simple docstring""" try: SCREAMING_SNAKE_CASE : Tuple = int(lowerCamelCase_ ) except (TypeError, ValueError): raise TypeError("""Parameter nth must be int or castable to int.""" ) from None if nth <= 0: raise ValueError("""Parameter nth must be greater than or equal to one.""" ) SCREAMING_SNAKE_CASE : list[int] = [] SCREAMING_SNAKE_CASE : Dict = 2 while len(lowerCamelCase_ ) < nth: if is_prime(lowerCamelCase_ ): primes.append(lowerCamelCase_ ) num += 1 else: num += 1 return primes[len(lowerCamelCase_ ) - 1] if __name__ == "__main__": print(f'''{solution() = }''')
79
0
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase_ ( self : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = tempfile.mkdtemp() # fmt: off SCREAMING_SNAKE_CASE : Tuple = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""] # fmt: on SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) ) SCREAMING_SNAKE_CASE : str = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""] SCREAMING_SNAKE_CASE : Any = {"""unk_token""": """<unk>"""} SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) SCREAMING_SNAKE_CASE : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(lowerCamelCase_ ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : str = { """do_resize""": True, """size""": 20, """do_center_crop""": True, """crop_size""": 18, """do_normalize""": True, """image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073], """image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711], } SCREAMING_SNAKE_CASE : Tuple = os.path.join(self.tmpdirname , lowerCamelCase_ ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[int] , **lowerCamelCase_ : str ): '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_ ) def lowerCamelCase_ ( self : Any , **lowerCamelCase_ : Optional[int] ): '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] , **lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' return CLIPImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] SCREAMING_SNAKE_CASE : Tuple = [Image.fromarray(np.moveaxis(lowerCamelCase_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer() SCREAMING_SNAKE_CASE : Optional[Any] = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE : str = self.get_image_processor() SCREAMING_SNAKE_CASE : Optional[int] = CLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ ) processor_slow.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = CLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ ) processor_fast.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE : Tuple = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , lowerCamelCase_ ) self.assertIsInstance(processor_fast.tokenizer , lowerCamelCase_ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , lowerCamelCase_ ) self.assertIsInstance(processor_fast.image_processor , lowerCamelCase_ ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_image_processor(do_normalize=lowerCamelCase_ , padding_value=1.0 ) SCREAMING_SNAKE_CASE : Optional[Any] = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCamelCase_ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , lowerCamelCase_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.get_image_processor() SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE : Optional[Any] = CLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_image_inputs() SCREAMING_SNAKE_CASE : Any = image_processor(lowerCamelCase_ , return_tensors="""np""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = processor(images=lowerCamelCase_ , return_tensors="""np""" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_image_processor() SCREAMING_SNAKE_CASE : Any = self.get_tokenizer() SCREAMING_SNAKE_CASE : int = CLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = """lower newer""" SCREAMING_SNAKE_CASE : Dict = processor(text=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(lowerCamelCase_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.get_image_processor() SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer() SCREAMING_SNAKE_CASE : Tuple = CLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = """lower newer""" SCREAMING_SNAKE_CASE : Dict = self.prepare_image_inputs() SCREAMING_SNAKE_CASE : Dict = processor(text=lowerCamelCase_ , images=lowerCamelCase_ ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(lowerCamelCase_ ): processor() def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.get_image_processor() SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer() SCREAMING_SNAKE_CASE : List[str] = CLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] SCREAMING_SNAKE_CASE : Optional[Any] = processor.batch_decode(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.batch_decode(lowerCamelCase_ ) self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.get_image_processor() SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE : Optional[int] = CLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = """lower newer""" SCREAMING_SNAKE_CASE : Any = self.prepare_image_inputs() SCREAMING_SNAKE_CASE : int = processor(text=lowerCamelCase_ , images=lowerCamelCase_ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
710
'''simple docstring''' from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent __UpperCAmelCase = {"""UserAgent""": UserAgent().random} def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = script.contents[0] SCREAMING_SNAKE_CASE : int = json.loads(data[data.find("""{\"config\"""" ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class UpperCamelCase__ : """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = f'''https://www.instagram.com/{username}/''' SCREAMING_SNAKE_CASE : Any = self.get_json() def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = requests.get(self.url , headers=lowerCamelCase_ ).text SCREAMING_SNAKE_CASE : List[Any] = BeautifulSoup(lowerCamelCase_ , """html.parser""" ).find_all("""script""" ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__( self : Dict ): '''simple docstring''' return f'''{self.__class__.__name__}(\'{self.username}\')''' def __str__( self : int ): '''simple docstring''' return f'''{self.fullname} ({self.username}) is {self.biography}''' @property def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' return self.user_data["username"] @property def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' return self.user_data["full_name"] @property def lowerCamelCase_ ( self : int ): '''simple docstring''' return self.user_data["biography"] @property def lowerCamelCase_ ( self : str ): '''simple docstring''' return self.user_data["business_email"] @property def lowerCamelCase_ ( self : str ): '''simple docstring''' return self.user_data["external_url"] @property def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return self.user_data["edge_followed_by"]["count"] @property def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return self.user_data["edge_follow"]["count"] @property def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return self.user_data["edge_owner_to_timeline_media"]["count"] @property def lowerCamelCase_ ( self : str ): '''simple docstring''' return self.user_data["profile_pic_url_hd"] @property def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return self.user_data["is_verified"] @property def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' return self.user_data["is_private"] def __A ( lowerCamelCase_ = "github" ): """simple docstring""" import os if os.environ.get("""CI""" ): return # test failing on GitHub Actions SCREAMING_SNAKE_CASE : Any = InstagramUser(lowerCamelCase_ ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , lowerCamelCase_ ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 1_50 assert instagram_user.number_of_followers > 12_00_00 assert instagram_user.number_of_followings > 15 assert instagram_user.email == "support@github.com" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith("""https://instagram.""" ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() __UpperCAmelCase = InstagramUser("""github""") print(instagram_user) print(f'''{instagram_user.number_of_posts = }''') print(f'''{instagram_user.number_of_followers = }''') print(f'''{instagram_user.number_of_followings = }''') print(f'''{instagram_user.email = }''') print(f'''{instagram_user.website = }''') print(f'''{instagram_user.profile_picture_url = }''') print(f'''{instagram_user.is_verified = }''') print(f'''{instagram_user.is_private = }''')
79
0