code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" from math import ceil def __SCREAMING_SNAKE_CASE ( A_ = 10_01 ): lowerCAmelCase__ : Union[str, Any] = 1 for i in range(1 , int(ceil(n / 2.0 ) ) ): lowerCAmelCase__ : int = 2 * i + 1 lowerCAmelCase__ : Any = 2 * i lowerCAmelCase__ : Optional[Any] = total + 4 * odd**2 - 6 * even return total if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution()) else: try: __UpperCamelCase : Optional[Any] = int(sys.argv[1]) print(solution(n)) except ValueError: print('''Invalid entry - please enter a number''')
106
'''simple docstring''' from collections import deque from math import floor from random import random from time import time class _lowerCAmelCase : def __init__(self ): A_ : int = {} def _a (self , lowercase , lowercase , lowercase=1 ): if self.graph.get(lowercase ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: A_ : Tuple = [[w, v]] if not self.graph.get(lowercase ): A_ : Union[str, Any] = [] def _a (self ): return list(self.graph ) def _a (self , lowercase , lowercase ): if self.graph.get(lowercase ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowercase ) def _a (self , lowercase=-2 , lowercase=-1 ): if s == d: return [] A_ : Union[str, Any] = [] A_ : Dict = [] if s == -2: A_ : List[Any] = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A_ : Any = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A_ : Tuple = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowercase ) return visited else: stack.append(node[1] ) visited.append(node[1] ) A_ : List[Any] = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowercase ) != 0: A_ : List[str] = stack[len(lowercase ) - 1] else: A_ : str = ss # check if se have reached the starting point if len(lowercase ) == 0: return visited def _a (self , lowercase=-1 ): if c == -1: A_ : Tuple = floor(random() * 10000 ) + 10 for i in range(lowercase ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): A_ : List[str] = floor(random() * c ) + 1 if n != i: self.add_pair(lowercase , lowercase , 1 ) def _a (self , lowercase=-2 ): A_ : Union[str, Any] = deque() A_ : Tuple = [] if s == -2: A_ : int = list(self.graph )[0] d.append(lowercase ) visited.append(lowercase ) while d: A_ : Any = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def _a (self , lowercase ): A_ : Dict = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def _a (self , lowercase ): return len(self.graph[u] ) def _a (self , lowercase=-2 ): A_ : Dict = [] A_ : Optional[Any] = [] if s == -2: A_ : int = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A_ : Optional[Any] = s A_ : int = [] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A_ : Optional[int] = s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A_ : Optional[Any] = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(lowercase ) != 0: A_ : int = stack[len(lowercase ) - 1] else: A_ : Optional[Any] = ss # check if se have reached the starting point if len(lowercase ) == 0: return sorted_nodes def _a (self ): A_ : Dict = [] A_ : Tuple = [] A_ : Any = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A_ : Optional[int] = -2 A_ : List[Any] = [] A_ : List[str] = s A_ : Optional[int] = False A_ : Union[str, Any] = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A_ : List[Any] = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A_ : Dict = len(lowercase ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A_ : List[str] = node[1] break # check if all the children are visited if s == ss: stack.pop() A_ : str = True if len(lowercase ) != 0: A_ : Union[str, Any] = stack[len(lowercase ) - 1] else: A_ : Tuple = False indirect_parents.append(lowercase ) A_ : Tuple = s A_ : Tuple = ss # check if se have reached the starting point if len(lowercase ) == 0: return list(lowercase ) def _a (self ): A_ : Union[str, Any] = [] A_ : str = [] A_ : List[Any] = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A_ : List[Any] = -2 A_ : Tuple = [] A_ : Optional[Any] = s A_ : Union[str, Any] = False A_ : Optional[Any] = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A_ : int = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A_ : Dict = len(lowercase ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A_ : Optional[Any] = node[1] break # check if all the children are visited if s == ss: stack.pop() A_ : List[str] = True if len(lowercase ) != 0: A_ : Dict = stack[len(lowercase ) - 1] else: A_ : int = False indirect_parents.append(lowercase ) A_ : List[Any] = s A_ : int = ss # check if se have reached the starting point if len(lowercase ) == 0: return False def _a (self , lowercase=-2 , lowercase=-1 ): A_ : str = time() self.dfs(lowercase , lowercase ) A_ : Any = time() return end - begin def _a (self , lowercase=-2 ): A_ : Union[str, Any] = time() self.bfs(lowercase ) A_ : Optional[Any] = time() return end - begin class _lowerCAmelCase : def __init__(self ): A_ : List[str] = {} def _a (self , lowercase , lowercase , lowercase=1 ): # check if the u exists if self.graph.get(lowercase ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist A_ : int = [[w, v]] # add the other way if self.graph.get(lowercase ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist A_ : Any = [[w, u]] def _a (self , lowercase , lowercase ): if self.graph.get(lowercase ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowercase ) # the other way round if self.graph.get(lowercase ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(lowercase ) def _a (self , lowercase=-2 , lowercase=-1 ): if s == d: return [] A_ : Dict = [] A_ : List[Any] = [] if s == -2: A_ : str = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A_ : List[str] = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A_ : Union[str, Any] = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowercase ) return visited else: stack.append(node[1] ) visited.append(node[1] ) A_ : Optional[Any] = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowercase ) != 0: A_ : Dict = stack[len(lowercase ) - 1] else: A_ : List[str] = ss # check if se have reached the starting point if len(lowercase ) == 0: return visited def _a (self , lowercase=-1 ): if c == -1: A_ : Union[str, Any] = floor(random() * 10000 ) + 10 for i in range(lowercase ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): A_ : Union[str, Any] = floor(random() * c ) + 1 if n != i: self.add_pair(lowercase , lowercase , 1 ) def _a (self , lowercase=-2 ): A_ : int = deque() A_ : Optional[Any] = [] if s == -2: A_ : Optional[int] = list(self.graph )[0] d.append(lowercase ) visited.append(lowercase ) while d: A_ : Union[str, Any] = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def _a (self , lowercase ): return len(self.graph[u] ) def _a (self ): A_ : Optional[int] = [] A_ : Dict = [] A_ : Union[str, Any] = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A_ : List[Any] = -2 A_ : List[str] = [] A_ : int = s A_ : Optional[int] = False A_ : Any = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A_ : Tuple = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A_ : int = len(lowercase ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A_ : Tuple = node[1] break # check if all the children are visited if s == ss: stack.pop() A_ : str = True if len(lowercase ) != 0: A_ : Any = stack[len(lowercase ) - 1] else: A_ : str = False indirect_parents.append(lowercase ) A_ : Dict = s A_ : List[Any] = ss # check if se have reached the starting point if len(lowercase ) == 0: return list(lowercase ) def _a (self ): A_ : Optional[int] = [] A_ : Optional[int] = [] A_ : List[str] = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A_ : Any = -2 A_ : Any = [] A_ : Tuple = s A_ : str = False A_ : str = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A_ : Dict = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A_ : Any = len(lowercase ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A_ : Optional[Any] = node[1] break # check if all the children are visited if s == ss: stack.pop() A_ : Union[str, Any] = True if len(lowercase ) != 0: A_ : List[Any] = stack[len(lowercase ) - 1] else: A_ : int = False indirect_parents.append(lowercase ) A_ : int = s A_ : int = ss # check if se have reached the starting point if len(lowercase ) == 0: return False def _a (self ): return list(self.graph ) def _a (self , lowercase=-2 , lowercase=-1 ): A_ : Any = time() self.dfs(lowercase , lowercase ) A_ : Optional[Any] = time() return end - begin def _a (self , lowercase=-2 ): A_ : List[Any] = time() self.bfs(lowercase ) A_ : List[Any] = time() return end - begin
206
0
"""simple docstring""" import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class _lowerCamelCase : @property def _lowerCAmelCase ( self : Tuple ) -> int: """simple docstring""" return self.get_dummy_input() @property def _lowerCAmelCase ( self : int ) -> List[str]: """simple docstring""" if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(f"""\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.""" ) def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : Dict=True , UpperCamelCase : List[str]=False , UpperCamelCase : Optional[Any]=False , UpperCamelCase : Any=False , ) -> Any: """simple docstring""" lowerCAmelCase__ : Tuple = 4 lowerCAmelCase__ : List[Any] = 32 lowerCAmelCase__ : Optional[Any] = (32, 32) lowerCAmelCase__ : Tuple = torch.manual_seed(0 ) lowerCAmelCase__ : int = torch.device(UpperCamelCase ) lowerCAmelCase__ : List[str] = (batch_size, num_channels) + sizes lowerCAmelCase__ : Dict = randn_tensor(UpperCamelCase , generator=UpperCamelCase , device=UpperCamelCase ) lowerCAmelCase__ : Tuple = {"""hidden_states""": hidden_states} if include_temb: lowerCAmelCase__ : Any = 1_28 lowerCAmelCase__ : Optional[int] = randn_tensor((batch_size, temb_channels) , generator=UpperCamelCase , device=UpperCamelCase ) if include_res_hidden_states_tuple: lowerCAmelCase__ : Optional[Any] = torch.manual_seed(1 ) lowerCAmelCase__ : int = (randn_tensor(UpperCamelCase , generator=UpperCamelCase , device=UpperCamelCase ),) if include_encoder_hidden_states: lowerCAmelCase__ : Union[str, Any] = floats_tensor((batch_size, 32, 32) ).to(UpperCamelCase ) if include_skip_sample: lowerCAmelCase__ : List[Any] = randn_tensor(((batch_size, 3) + sizes) , generator=UpperCamelCase , device=UpperCamelCase ) return dummy_input def _lowerCAmelCase ( self : Optional[int] ) -> List[Any]: """simple docstring""" lowerCAmelCase__ : Union[str, Any] = { """in_channels""": 32, """out_channels""": 32, """temb_channels""": 1_28, } if self.block_type == "up": lowerCAmelCase__ : Tuple = 32 if self.block_type == "mid": init_dict.pop("""out_channels""" ) lowerCAmelCase__ : int = self.dummy_input return init_dict, inputs_dict def _lowerCAmelCase ( self : int , UpperCamelCase : Optional[Any] ) -> Tuple: """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.prepare_init_args_and_inputs_for_common() lowerCAmelCase__ : List[Any] = self.block_class(**UpperCamelCase ) unet_block.to(UpperCamelCase ) unet_block.eval() with torch.no_grad(): lowerCAmelCase__ : str = unet_block(**UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : Optional[int] = output[0] self.assertEqual(output.shape , self.output_shape ) lowerCAmelCase__ : List[str] = output[0, -1, -3:, -3:] lowerCAmelCase__ : Union[str, Any] = torch.tensor(UpperCamelCase ).to(UpperCamelCase ) assert torch_all_close(output_slice.flatten() , UpperCamelCase , atol=5E-3 ) @unittest.skipIf(torch_device == """mps""" , """Training is not supported in mps""" ) def _lowerCAmelCase ( self : Optional[int] ) -> Any: """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.prepare_init_args_and_inputs_for_common() lowerCAmelCase__ : Optional[int] = self.block_class(**UpperCamelCase ) model.to(UpperCamelCase ) model.train() lowerCAmelCase__ : Union[str, Any] = model(**UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : int = output[0] lowerCAmelCase__ : Any = torch.device(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = randn_tensor(output.shape , device=UpperCamelCase ) lowerCAmelCase__ : Optional[int] = torch.nn.functional.mse_loss(UpperCamelCase , UpperCamelCase ) loss.backward()
366
"""simple docstring""" import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _A = logging.get_logger(__name__) _A = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_config_file""": """tokenizer_config.json""", } _A = { """vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""}, """merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""}, """tokenizer_config_file""": { """facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json""" }, } _A = {"""facebook/blenderbot-3B""": 1_2_8} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def lowercase_ ( ) -> Tuple: lowerCAmelCase__ : int = ( list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) ) ) lowerCAmelCase__ : Any = bs[:] lowerCAmelCase__ : Optional[int] = 0 for b in range(2**8 ): if b not in bs: bs.append(__UpperCAmelCase ) cs.append(2**8 + n ) n += 1 lowerCAmelCase__ : Dict = [chr(__UpperCAmelCase ) for n in cs] return dict(zip(__UpperCAmelCase , __UpperCAmelCase ) ) def lowercase_ ( __UpperCAmelCase ) -> List[Any]: lowerCAmelCase__ : List[Any] = set() lowerCAmelCase__ : Union[str, Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCAmelCase__ : Optional[Any] = char return pairs class _lowerCamelCase ( a_ ): _lowerCamelCase :Optional[Any] = VOCAB_FILES_NAMES _lowerCamelCase :List[Any] = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase :Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase :Optional[Any] = ["input_ids", "attention_mask"] def __init__( self : Any , UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : Any="replace" , UpperCamelCase : Optional[Any]="<s>" , UpperCamelCase : Union[str, Any]="</s>" , UpperCamelCase : Optional[int]="</s>" , UpperCamelCase : str="<s>" , UpperCamelCase : int="<unk>" , UpperCamelCase : int="<pad>" , UpperCamelCase : Dict="<mask>" , UpperCamelCase : Optional[int]=False , **UpperCamelCase : Optional[Any] , ) -> Any: """simple docstring""" lowerCAmelCase__ : Union[str, Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else bos_token lowerCAmelCase__ : int = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else eos_token lowerCAmelCase__ : Dict = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else sep_token lowerCAmelCase__ : Union[str, Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else cls_token lowerCAmelCase__ : int = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else unk_token lowerCAmelCase__ : Union[str, Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase__ : Union[str, Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token super().__init__( errors=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , cls_token=UpperCamelCase , pad_token=UpperCamelCase , mask_token=UpperCamelCase , add_prefix_space=UpperCamelCase , **UpperCamelCase , ) with open(UpperCamelCase , encoding="""utf-8""" ) as vocab_handle: lowerCAmelCase__ : Any = json.load(UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = {v: k for k, v in self.encoder.items()} lowerCAmelCase__ : Dict = errors # how to handle errors in decoding lowerCAmelCase__ : Union[str, Any] = bytes_to_unicode() lowerCAmelCase__ : List[str] = {v: k for k, v in self.byte_encoder.items()} with open(UpperCamelCase , encoding="""utf-8""" ) as merges_handle: lowerCAmelCase__ : Optional[int] = merges_handle.read().split("""\n""" )[1:-1] lowerCAmelCase__ : Dict = [tuple(merge.split() ) for merge in bpe_merges] lowerCAmelCase__ : Any = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) ) lowerCAmelCase__ : Union[str, Any] = {} lowerCAmelCase__ : Dict = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowerCAmelCase__ : Tuple = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def _lowerCAmelCase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" return len(self.encoder ) def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def _lowerCAmelCase ( self : List[str] , UpperCamelCase : str ) -> Union[str, Any]: """simple docstring""" if token in self.cache: return self.cache[token] lowerCAmelCase__ : Union[str, Any] = tuple(UpperCamelCase ) lowerCAmelCase__ : List[str] = get_pairs(UpperCamelCase ) if not pairs: return token while True: lowerCAmelCase__ : List[str] = min(UpperCamelCase , key=lambda UpperCamelCase : self.bpe_ranks.get(UpperCamelCase , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break lowerCAmelCase__ , lowerCAmelCase__ : str = bigram lowerCAmelCase__ : List[str] = [] lowerCAmelCase__ : List[str] = 0 while i < len(UpperCamelCase ): try: lowerCAmelCase__ : Optional[Any] = word.index(UpperCamelCase , UpperCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCAmelCase__ : List[str] = j if word[i] == first and i < len(UpperCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCAmelCase__ : List[Any] = tuple(UpperCamelCase ) lowerCAmelCase__ : Tuple = new_word if len(UpperCamelCase ) == 1: break else: lowerCAmelCase__ : Any = get_pairs(UpperCamelCase ) lowerCAmelCase__ : Tuple = """ """.join(UpperCamelCase ) lowerCAmelCase__ : Tuple = word return word def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : List[str] ) -> Optional[Any]: """simple docstring""" lowerCAmelCase__ : Tuple = [] for token in re.findall(self.pat , UpperCamelCase ): lowerCAmelCase__ : List[Any] = """""".join( self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase ).split(""" """ ) ) return bpe_tokens def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : Union[str, Any] ) -> Dict: """simple docstring""" return self.encoder.get(UpperCamelCase , self.encoder.get(self.unk_token ) ) def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : Optional[Any] ) -> Tuple: """simple docstring""" return self.decoder.get(UpperCamelCase ) def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : Optional[int] ) -> Optional[int]: """simple docstring""" lowerCAmelCase__ : List[str] = """""".join(UpperCamelCase ) lowerCAmelCase__ : List[str] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors ) return text def _lowerCAmelCase ( self : List[str] , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(UpperCamelCase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase__ : Union[str, Any] = os.path.join( UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCAmelCase__ : int = os.path.join( UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase , ensure_ascii=UpperCamelCase ) + """\n""" ) lowerCAmelCase__ : Optional[Any] = 0 with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" """ Please check that the tokenizer is not corrupted!""" ) lowerCAmelCase__ : Dict = token_index writer.write(""" """.join(UpperCamelCase ) + """\n""" ) index += 1 return vocab_file, merge_file def _lowerCAmelCase ( self : Dict , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase ) if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase )) + [1] return [1] + ([0] * len(UpperCamelCase )) + [1, 1] + ([0] * len(UpperCamelCase )) + [1] def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" lowerCAmelCase__ : Union[str, Any] = [self.sep_token_id] lowerCAmelCase__ : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Optional[int]=False , **UpperCamelCase : Union[str, Any] ) -> str: """simple docstring""" lowerCAmelCase__ : int = kwargs.pop("""add_prefix_space""" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase ) > 0 and not text[0].isspace()): lowerCAmelCase__ : Tuple = """ """ + text return (text, kwargs) def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ) -> Any: """simple docstring""" return token_ids_a + [self.eos_token_id] def _lowerCAmelCase ( self : str , UpperCamelCase : "Conversation" ) -> List[int]: """simple docstring""" lowerCAmelCase__ : List[str] = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(""" """ + text ) else: # Generated responses should contain them already. inputs.append(UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = """ """.join(UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = self.encode(UpperCamelCase ) if len(UpperCamelCase ) > self.model_max_length: lowerCAmelCase__ : List[str] = input_ids[-self.model_max_length :] logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" ) return input_ids
212
0
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class __lowerCAmelCase : """simple docstring""" def __init__( self : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int=1_3 , _lowerCAmelCase : List[str]=7 , _lowerCAmelCase : Dict=True , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : str=True , _lowerCAmelCase : List[Any]=9_9 , _lowerCAmelCase : List[str]=3_2 , _lowerCAmelCase : Any=5 , _lowerCAmelCase : List[str]=4 , _lowerCAmelCase : int=3_7 , _lowerCAmelCase : Optional[int]="gelu" , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : Optional[Any]=5_1_2 , _lowerCAmelCase : Union[str, Any]=1_6 , _lowerCAmelCase : Dict=2 , _lowerCAmelCase : Tuple=0.02 , _lowerCAmelCase : List[Any]=3 , _lowerCAmelCase : Tuple=4 , _lowerCAmelCase : int=None , ) -> int: """simple docstring""" snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_token_type_ids snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = type_sequence_label_size snake_case_ = initializer_range snake_case_ = num_labels snake_case_ = num_choices snake_case_ = scope snake_case_ = self.vocab_size - 1 def lowerCAmelCase__ ( self : Optional[Any] ) -> List[Any]: """simple docstring""" snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = None if self.use_token_type_ids: snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case_ = None snake_case_ = None snake_case_ = None if self.use_labels: snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case_ = ids_tensor([self.batch_size] , self.num_choices ) snake_case_ = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) snake_case_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def lowerCAmelCase__ ( self : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict , _lowerCAmelCase : Any , *_lowerCAmelCase : Union[str, Any] ) -> Any: """simple docstring""" snake_case_ = OpenAIGPTModel(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() snake_case_ = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , head_mask=lowerCAmelCase__ ) snake_case_ = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ ) snake_case_ = model(lowerCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase__ ( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , *_lowerCAmelCase : List[Any] ) -> Dict: """simple docstring""" snake_case_ = OpenAIGPTLMHeadModel(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() snake_case_ = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase__ ( self : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] , *_lowerCAmelCase : Optional[Any] ) -> Any: """simple docstring""" snake_case_ = OpenAIGPTDoubleHeadsModel(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() snake_case_ = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase__ ( self : Dict , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , *_lowerCAmelCase : Optional[int] ) -> Dict: """simple docstring""" snake_case_ = self.num_labels snake_case_ = OpenAIGPTForSequenceClassification(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase__ ( self : Optional[Any] ) -> str: """simple docstring""" snake_case_ = self.prepare_config_and_inputs() ( snake_case_ ) = config_and_inputs snake_case_ = { '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask, } return config, inputs_dict @require_torch class __lowerCAmelCase ( A__ , A__ , A__ , unittest.TestCase ): """simple docstring""" _SCREAMING_SNAKE_CASE = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) _SCREAMING_SNAKE_CASE = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly _SCREAMING_SNAKE_CASE = ( { '''feature-extraction''': OpenAIGPTModel, '''text-classification''': OpenAIGPTForSequenceClassification, '''text-generation''': OpenAIGPTLMHeadModel, '''zero-shot''': OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def lowerCAmelCase__ ( self : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] ) -> Tuple: """simple docstring""" if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def lowerCAmelCase__ ( self : List[str] , _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : int=False ) -> Dict: """simple docstring""" snake_case_ = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": snake_case_ = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase__ , ) snake_case_ = inputs_dict['''labels'''] snake_case_ = inputs_dict['''labels'''] snake_case_ = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCAmelCase__ , ) snake_case_ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ ) return inputs_dict def lowerCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" snake_case_ = OpenAIGPTModelTester(self ) snake_case_ = ConfigTester(self , config_class=lowerCAmelCase__ , n_embd=3_7 ) def lowerCAmelCase__ ( self : Any ) -> Optional[Any]: """simple docstring""" self.config_tester.run_common_tests() def lowerCAmelCase__ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*lowerCAmelCase__ ) def lowerCAmelCase__ ( self : int ) -> int: """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase__ ) def lowerCAmelCase__ ( self : List[str] ) -> Dict: """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*lowerCAmelCase__ ) def lowerCAmelCase__ ( self : List[str] ) -> str: """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCAmelCase__ ) @slow def lowerCAmelCase__ ( self : Any ) -> List[Any]: """simple docstring""" for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ = OpenAIGPTModel.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) @require_torch class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def lowerCAmelCase__ ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" snake_case_ = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" ) model.to(lowerCAmelCase__ ) snake_case_ = torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=lowerCAmelCase__ ) # the president is snake_case_ = [ 4_8_1, 4_7_3_5, 5_4_4, 2_4_6, 9_6_3, 8_7_0, 7_6_2, 2_3_9, 2_4_4, 4_0_4_7_7, 2_4_4, 2_4_9, 7_1_9, 8_8_1, 4_8_7, 5_4_4, 2_4_0, 2_4_4, 6_0_3, 4_8_1, ] # the president is a very good man. " \n " i\'m sure he is, " said the snake_case_ = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ ) self.assertListEqual(output_ids[0].tolist() , lowerCAmelCase__ )
159
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__=False ): if isinstance(lowercase__ , lowercase__ ) and isinstance(lowercase__ , lowercase__ ): __SCREAMING_SNAKE_CASE : List[str] = len(set_a.intersection(lowercase__ ) ) if alternative_union: __SCREAMING_SNAKE_CASE : int = len(lowercase__ ) + len(lowercase__ ) else: __SCREAMING_SNAKE_CASE : int = len(set_a.union(lowercase__ ) ) return intersection / union if isinstance(lowercase__ , (list, tuple) ) and isinstance(lowercase__ , (list, tuple) ): __SCREAMING_SNAKE_CASE : Dict = [element for element in set_a if element in set_b] if alternative_union: __SCREAMING_SNAKE_CASE : Optional[int] = len(lowercase__ ) + len(lowercase__ ) return len(lowercase__ ) / union else: __SCREAMING_SNAKE_CASE : Tuple = set_a + [element for element in set_b if element not in set_a] return len(lowercase__ ) / len(lowercase__ ) return len(lowercase__ ) / len(lowercase__ ) return None if __name__ == "__main__": __lowerCAmelCase : List[Any] ={'a', 'b', 'c', 'd', 'e'} __lowerCAmelCase : Optional[Any] ={'c', 'd', 'e', 'f', 'h', 'i'} print(jaccard_similarity(set_a, set_b))
9
0
'''simple docstring''' from urllib.parse import quote import pytest from datasets.utils.hub import hf_hub_url @pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] ) @pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] ) @pytest.mark.parametrize("revision" , [None, "v2"] ) def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): _UpperCAmelCase : List[Any] = hf_hub_url(repo_id=__lowerCAmelCase , path=__lowerCAmelCase , revision=__lowerCAmelCase ) assert url == F"""https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(__lowerCAmelCase )}"""
322
'''simple docstring''' def __lowerCAmelCase (__lowerCAmelCase ): _UpperCAmelCase : List[Any] = len(__lowerCAmelCase ) _UpperCAmelCase : Tuple = sum(__lowerCAmelCase ) _UpperCAmelCase : List[Any] = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): _UpperCAmelCase : Any = True for i in range(1 , s + 1 ): _UpperCAmelCase : List[Any] = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): _UpperCAmelCase : Optional[int] = dp[i][j - 1] if arr[i - 1] <= j: _UpperCAmelCase : Any = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: _UpperCAmelCase : List[Any] = s - 2 * j break return diff
322
1
from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Union[str, Any] = { 'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json', 'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json', 'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json', 'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json', 'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json', 'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json', 'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json', 'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json', 'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json', 'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json', 'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json', 'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json', } class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : Tuple = """codegen""" lowerCamelCase_ : Optional[int] = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , UpperCamelCase__=5_0400 , UpperCamelCase__=2048 , UpperCamelCase__=2048 , UpperCamelCase__=4096 , UpperCamelCase__=28 , UpperCamelCase__=16 , UpperCamelCase__=64 , UpperCamelCase__=None , UpperCamelCase__="gelu_new" , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=1e-5 , UpperCamelCase__=0.02 , UpperCamelCase__=True , UpperCamelCase__=5_0256 , UpperCamelCase__=5_0256 , UpperCamelCase__=False , **UpperCamelCase__ , ) -> List[str]: lowerCamelCase : Union[str, Any] = vocab_size lowerCamelCase : Optional[Any] = n_ctx lowerCamelCase : Optional[int] = n_positions lowerCamelCase : int = n_embd lowerCamelCase : Optional[Any] = n_layer lowerCamelCase : List[Any] = n_head lowerCamelCase : Optional[int] = n_inner lowerCamelCase : Optional[int] = rotary_dim lowerCamelCase : int = activation_function lowerCamelCase : List[str] = resid_pdrop lowerCamelCase : Optional[int] = embd_pdrop lowerCamelCase : Tuple = attn_pdrop lowerCamelCase : int = layer_norm_epsilon lowerCamelCase : Optional[Any] = initializer_range lowerCamelCase : str = use_cache lowerCamelCase : List[Any] = bos_token_id lowerCamelCase : Tuple = eos_token_id super().__init__( bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , tie_word_embeddings=UpperCamelCase__ , **UpperCamelCase__ ) class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' def __init__( self , UpperCamelCase__ , UpperCamelCase__ = "default" , UpperCamelCase__ = None , UpperCamelCase__ = False , ) -> List[Any]: super().__init__(UpperCamelCase__ , task=UpperCamelCase__ , patching_specs=UpperCamelCase__ , use_past=UpperCamelCase__ ) if not getattr(self._config , "pad_token_id" , UpperCamelCase__ ): # TODO: how to do that better? lowerCamelCase : Union[str, Any] = 0 @property def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]: lowerCamelCase : List[Any] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} ) if self.use_past: self.fill_with_past_key_values_(UpperCamelCase__ , direction="inputs" ) lowerCamelCase : Optional[Any] = {0: "batch", 1: "past_sequence + sequence"} else: lowerCamelCase : Dict = {0: "batch", 1: "sequence"} return common_inputs @property def _lowercase ( self ) -> int: return self._config.n_layer @property def _lowercase ( self ) -> int: return self._config.n_head def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = -1 , UpperCamelCase__ = -1 , UpperCamelCase__ = False , UpperCamelCase__ = None , ) -> Mapping[str, Any]: lowerCamelCase : int = super(UpperCamelCase__ , self ).generate_dummy_inputs( UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ ) # We need to order the input in the way they appears in the forward() lowerCamelCase : int = OrderedDict({"input_ids": common_inputs["input_ids"]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch lowerCamelCase , lowerCamelCase : Optional[int] = common_inputs["input_ids"].shape # Not using the same length for past_key_values lowerCamelCase : List[Any] = seqlen + 2 lowerCamelCase : Any = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) lowerCamelCase : Union[str, Any] = [ (torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(self.num_layers ) ] lowerCamelCase : str = common_inputs["attention_mask"] if self.use_past: lowerCamelCase : Optional[Any] = ordered_inputs["attention_mask"].dtype lowerCamelCase : Union[str, Any] = torch.cat( [ordered_inputs["attention_mask"], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 ) return ordered_inputs @property def _lowercase ( self ) -> int: return 13
48
'''simple docstring''' import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class __snake_case ( _SCREAMING_SNAKE_CASE): """simple docstring""" lowercase = (IPNDMScheduler,) lowercase = (('num_inference_steps', 50),) def __lowercase ( self : Optional[Any] , **lowerCamelCase : int ) -> Tuple: lowerCAmelCase_ : Dict = {"""num_train_timesteps""": 10_00} config.update(**lowerCamelCase ) return config def __lowercase ( self : Dict , lowerCamelCase : Tuple=0 , **lowerCamelCase : Union[str, Any] ) -> Optional[Any]: lowerCAmelCase_ : Optional[Any] = dict(self.forward_default_kwargs ) lowerCAmelCase_ : Dict = kwargs.pop("""num_inference_steps""" , lowerCamelCase ) lowerCAmelCase_ : List[str] = self.dummy_sample lowerCAmelCase_ : Optional[int] = 0.1 * sample lowerCAmelCase_ : int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: lowerCAmelCase_ : List[str] = self.get_scheduler_config(**lowerCamelCase ) lowerCAmelCase_ : int = scheduler_class(**lowerCamelCase ) scheduler.set_timesteps(lowerCamelCase ) # copy over dummy past residuals lowerCAmelCase_ : Optional[int] = dummy_past_residuals[:] if time_step is None: lowerCAmelCase_ : Optional[Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowerCamelCase ) lowerCAmelCase_ : List[Any] = scheduler_class.from_pretrained(lowerCamelCase ) new_scheduler.set_timesteps(lowerCamelCase ) # copy over dummy past residuals lowerCAmelCase_ : str = dummy_past_residuals[:] lowerCAmelCase_ : int = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample lowerCAmelCase_ : Dict = new_scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" lowerCAmelCase_ : List[str] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample lowerCAmelCase_ : Any = new_scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def __lowercase ( self : str ) -> Tuple: pass def __lowercase ( self : Tuple , lowerCamelCase : int=0 , **lowerCamelCase : Any ) -> int: lowerCAmelCase_ : Optional[int] = dict(self.forward_default_kwargs ) lowerCAmelCase_ : List[str] = kwargs.pop("""num_inference_steps""" , lowerCamelCase ) lowerCAmelCase_ : Tuple = self.dummy_sample lowerCAmelCase_ : Optional[int] = 0.1 * sample lowerCAmelCase_ : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: lowerCAmelCase_ : Optional[int] = self.get_scheduler_config() lowerCAmelCase_ : Optional[int] = scheduler_class(**lowerCamelCase ) scheduler.set_timesteps(lowerCamelCase ) # copy over dummy past residuals (must be after setting timesteps) lowerCAmelCase_ : int = dummy_past_residuals[:] if time_step is None: lowerCAmelCase_ : Optional[Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowerCamelCase ) lowerCAmelCase_ : Optional[Any] = scheduler_class.from_pretrained(lowerCamelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(lowerCamelCase ) # copy over dummy past residual (must be after setting timesteps) lowerCAmelCase_ : Union[str, Any] = dummy_past_residuals[:] lowerCAmelCase_ : Any = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample lowerCAmelCase_ : Optional[Any] = new_scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" lowerCAmelCase_ : List[str] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample lowerCAmelCase_ : str = new_scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def __lowercase ( self : Optional[int] , **lowerCamelCase : Optional[Any] ) -> str: lowerCAmelCase_ : Optional[int] = self.scheduler_classes[0] lowerCAmelCase_ : List[Any] = self.get_scheduler_config(**lowerCamelCase ) lowerCAmelCase_ : str = scheduler_class(**lowerCamelCase ) lowerCAmelCase_ : Dict = 10 lowerCAmelCase_ : Optional[int] = self.dummy_model() lowerCAmelCase_ : Tuple = self.dummy_sample_deter scheduler.set_timesteps(lowerCamelCase ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase_ : Optional[Any] = model(lowerCamelCase , lowerCamelCase ) lowerCAmelCase_ : str = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase_ : Dict = model(lowerCamelCase , lowerCamelCase ) lowerCAmelCase_ : List[str] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample return sample def __lowercase ( self : Tuple ) -> Dict: lowerCAmelCase_ : List[Any] = dict(self.forward_default_kwargs ) lowerCAmelCase_ : int = kwargs.pop("""num_inference_steps""" , lowerCamelCase ) for scheduler_class in self.scheduler_classes: lowerCAmelCase_ : Tuple = self.get_scheduler_config() lowerCAmelCase_ : List[str] = scheduler_class(**lowerCamelCase ) lowerCAmelCase_ : int = self.dummy_sample lowerCAmelCase_ : Tuple = 0.1 * sample if num_inference_steps is not None and hasattr(lowerCamelCase , """set_timesteps""" ): scheduler.set_timesteps(lowerCamelCase ) elif num_inference_steps is not None and not hasattr(lowerCamelCase , """set_timesteps""" ): lowerCAmelCase_ : Dict = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) lowerCAmelCase_ : int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] lowerCAmelCase_ : List[str] = dummy_past_residuals[:] lowerCAmelCase_ : List[str] = scheduler.timesteps[5] lowerCAmelCase_ : str = scheduler.timesteps[6] lowerCAmelCase_ : Dict = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample lowerCAmelCase_ : List[str] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) lowerCAmelCase_ : Any = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample lowerCAmelCase_ : Optional[int] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def __lowercase ( self : Any ) -> int: for timesteps in [1_00, 10_00]: self.check_over_configs(num_train_timesteps=lowerCamelCase , time_step=lowerCamelCase ) def __lowercase ( self : Optional[int] ) -> Optional[int]: for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ): self.check_over_forward(num_inference_steps=lowerCamelCase , time_step=lowerCamelCase ) def __lowercase ( self : int ) -> List[Any]: lowerCAmelCase_ : List[Any] = self.full_loop() lowerCAmelCase_ : Any = torch.mean(torch.abs(lowerCamelCase ) ) assert abs(result_mean.item() - 2_54_05_29 ) < 10
120
0
"""simple docstring""" import json import re from typing import TYPE_CHECKING, List, Optional, Tuple, Union import numpy as np from ...utils import is_tf_available, is_torch_available, logging if TYPE_CHECKING: if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_codegen import CodeGenTokenizer lowercase__ = logging.get_logger(__name__) lowercase__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} lowercase__ = { """vocab_file""": { """Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json""", }, """merges_file""": { """Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt""", }, """tokenizer_file""": { """Salesforce/codegen-350M-mono""": ( """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json""" ), }, } lowercase__ = { """Salesforce/codegen-350M-mono""": 2048, } class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ = ["""input_ids""", """attention_mask"""] lowerCamelCase__ = CodeGenTokenizer def __init__( self , lowercase=None , lowercase=None , lowercase=None , lowercase="<|endoftext|>" , lowercase="<|endoftext|>" , lowercase="<|endoftext|>" , lowercase=False , **lowercase , ): super().__init__( lowercase , lowercase , tokenizer_file=lowercase , unk_token=lowercase , bos_token=lowercase , eos_token=lowercase , add_prefix_space=lowercase , **lowercase , ) if kwargs.pop('add_bos_token' , lowercase ): _lowerCamelCase : str = kwargs.pop('name_or_path' , '' ) raise ValueError( 'Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.' 'Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n' F'''`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n''' F'''`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n''' 'This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.' ' so that the fast tokenizer works correctly.' ) _lowerCamelCase : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , lowercase ) != add_prefix_space: _lowerCamelCase : Dict = getattr(lowercase , pre_tok_state.pop('type' ) ) _lowerCamelCase : Any = add_prefix_space _lowerCamelCase : List[Any] = pre_tok_class(**lowercase ) _lowerCamelCase : str = add_prefix_space def A_ ( self , *lowercase , **lowercase ): _lowerCamelCase : Any = kwargs.get('is_split_into_words' , lowercase ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowercase , **lowercase ) def A_ ( self , *lowercase , **lowercase ): _lowerCamelCase : List[str] = kwargs.get('is_split_into_words' , lowercase ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*lowercase , **lowercase ) def A_ ( self , lowercase , lowercase = None ): _lowerCamelCase : Optional[Any] = self._tokenizer.model.save(lowercase , name=lowercase ) return tuple(lowercase ) def A_ ( self , lowercase , lowercase = False , lowercase = None , lowercase = None , **lowercase , ): _lowerCamelCase : List[Any] = super().decode( token_ids=lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase , **lowercase , ) if truncate_before_pattern is not None and len(lowercase ) > 0: _lowerCamelCase : Dict = self.truncate(lowercase , lowercase ) return decoded_text def A_ ( self , lowercase , lowercase ): def find_re(lowercase , lowercase , lowercase ): _lowerCamelCase : List[str] = pattern.search(lowercase , lowercase ) return m.start() if m else -1 _lowerCamelCase : Dict = [re.compile(lowercase , re.MULTILINE ) for pattern in truncate_before_pattern] _lowerCamelCase : Tuple = list(re.finditer('^print' , lowercase , re.MULTILINE ) ) if len(lowercase ) > 1: _lowerCamelCase : Dict = completion[: prints[1].start()] _lowerCamelCase : Dict = list(re.finditer('^def' , lowercase , re.MULTILINE ) ) if len(lowercase ) > 1: _lowerCamelCase : Tuple = completion[: defs[1].start()] _lowerCamelCase : List[str] = 0 _lowerCamelCase : List[Any] = [ pos for pos in [find_re(lowercase , lowercase , lowercase ) for terminal in terminals] if pos != -1 ] if len(lowercase ) > 0: return completion[: min(lowercase )] else: return completion
12
"""simple docstring""" import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = (UnCLIPScheduler,) def A_ ( self , **lowercase ): _lowerCamelCase : Any = { 'num_train_timesteps': 1000, 'variance_type': 'fixed_small_log', 'clip_sample': True, 'clip_sample_range': 1.0, 'prediction_type': 'epsilon', } config.update(**lowercase ) return config def A_ ( self ): for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=lowercase ) def A_ ( self ): for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=lowercase ) def A_ ( self ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=lowercase ) def A_ ( self ): for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=lowercase ) def A_ ( self ): for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=lowercase ) def A_ ( self ): for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=lowercase , prev_timestep=lowercase ) def A_ ( self ): _lowerCamelCase : Optional[Any] = self.scheduler_classes[0] _lowerCamelCase : Optional[int] = self.get_scheduler_config(variance_type='fixed_small_log' ) _lowerCamelCase : str = scheduler_class(**lowercase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_54_96_25 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_99_49_87 ) ) < 1E-5 def A_ ( self ): _lowerCamelCase : List[str] = self.scheduler_classes[0] _lowerCamelCase : Optional[Any] = self.get_scheduler_config(variance_type='learned_range' ) _lowerCamelCase : int = scheduler_class(**lowercase ) _lowerCamelCase : List[str] = 0.5 assert scheduler._get_variance(1 , predicted_variance=lowercase ) - -10.1_71_27_90 < 1E-5 assert scheduler._get_variance(487 , predicted_variance=lowercase ) - -5.7_99_80_52 < 1E-5 assert scheduler._get_variance(999 , predicted_variance=lowercase ) - -0.0_01_00_11 < 1E-5 def A_ ( self ): _lowerCamelCase : List[Any] = self.scheduler_classes[0] _lowerCamelCase : Optional[Any] = self.get_scheduler_config() _lowerCamelCase : Tuple = scheduler_class(**lowercase ) _lowerCamelCase : Union[str, Any] = scheduler.timesteps _lowerCamelCase : Any = self.dummy_model() _lowerCamelCase : Optional[Any] = self.dummy_sample_deter _lowerCamelCase : Optional[int] = torch.manual_seed(0 ) for i, t in enumerate(lowercase ): # 1. predict noise residual _lowerCamelCase : Tuple = model(lowercase , lowercase ) # 2. predict previous mean of sample x_t-1 _lowerCamelCase : List[Any] = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase ).prev_sample _lowerCamelCase : Optional[int] = pred_prev_sample _lowerCamelCase : Optional[Any] = torch.sum(torch.abs(lowercase ) ) _lowerCamelCase : List[Any] = torch.mean(torch.abs(lowercase ) ) assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1E-2 assert abs(result_mean.item() - 0.3_28_47_43 ) < 1E-3 def A_ ( self ): _lowerCamelCase : Tuple = self.scheduler_classes[0] _lowerCamelCase : str = self.get_scheduler_config() _lowerCamelCase : Optional[Any] = scheduler_class(**lowercase ) scheduler.set_timesteps(25 ) _lowerCamelCase : Optional[Any] = scheduler.timesteps _lowerCamelCase : Optional[int] = self.dummy_model() _lowerCamelCase : Any = self.dummy_sample_deter _lowerCamelCase : str = torch.manual_seed(0 ) for i, t in enumerate(lowercase ): # 1. predict noise residual _lowerCamelCase : List[Any] = model(lowercase , lowercase ) if i + 1 == timesteps.shape[0]: _lowerCamelCase : Optional[int] = None else: _lowerCamelCase : List[str] = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 _lowerCamelCase : Union[str, Any] = scheduler.step( lowercase , lowercase , lowercase , prev_timestep=lowercase , generator=lowercase ).prev_sample _lowerCamelCase : List[Any] = pred_prev_sample _lowerCamelCase : Optional[Any] = torch.sum(torch.abs(lowercase ) ) _lowerCamelCase : List[str] = torch.mean(torch.abs(lowercase ) ) assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1E-2 assert abs(result_mean.item() - 0.3_36_20_38 ) < 1E-3 def A_ ( self ): pass def A_ ( self ): pass
12
1
import os from collections import namedtuple import pytest from datasets import ClassLabel, Features, Sequence, Value from datasets.commands.test import TestCommand from datasets.info import DatasetInfo, DatasetInfosDict UpperCamelCase_ = namedtuple( '''_TestCommandArgs''', [ '''dataset''', '''name''', '''cache_dir''', '''data_dir''', '''all_configs''', '''save_infos''', '''ignore_verifications''', '''force_redownload''', '''clear_cache''', ], defaults=[None, None, None, False, False, False, False, False], ) def lowerCamelCase_ ( _a : List[Any] , _a : List[Any] ): '''simple docstring''' return (abs(source - target ) / target) < 0.0_1 @pytest.mark.integration def lowerCamelCase_ ( _a : Any ): '''simple docstring''' UpperCAmelCase_ : Any = _TestCommandArgs(dataset=_a , all_configs=_a , save_infos=_a ) UpperCAmelCase_ : Tuple = TestCommand(*_a ) test_command.run() UpperCAmelCase_ : Optional[int] = os.path.join(_a , """README.md""" ) assert os.path.exists(_a ) UpperCAmelCase_ : Optional[Any] = DatasetInfosDict.from_directory(_a ) UpperCAmelCase_ : Tuple = DatasetInfosDict( { """default""": DatasetInfo( features=Features( { """tokens""": Sequence(Value("""string""" ) ), """ner_tags""": Sequence( ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ), """langs""": Sequence(Value("""string""" ) ), """spans""": Sequence(Value("""string""" ) ), } ) , splits=[ { """name""": """train""", """num_bytes""": 235_1563, """num_examples""": 1_0000, }, { """name""": """validation""", """num_bytes""": 23_8418, """num_examples""": 1000, }, ] , download_size=394_0680 , dataset_size=258_9981 , ) } ) assert dataset_infos.keys() == expected_dataset_infos.keys() for key in DatasetInfo._INCLUDED_INFO_IN_YAML: UpperCAmelCase_ , UpperCAmelCase_ : Dict = getattr(dataset_infos["""default"""] , _a ), getattr(expected_dataset_infos["""default"""] , _a ) if key == "num_bytes": assert is_apercent_close(_a , _a ) elif key == "splits": assert list(_a ) == list(_a ) for split in result: assert result[split].name == expected[split].name assert result[split].num_examples == expected[split].num_examples assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes ) else: result == expected
345
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=__snake_case ) class _snake_case ( __snake_case ): '''simple docstring''' A__ : str = field(default="automatic-speech-recognition" , metadata={"include_in_asdict_even_if_is_default": True} ) A__ : ClassVar[Features] = Features({"audio": Audio()} ) A__ : ClassVar[Features] = Features({"transcription": Value("string" )} ) A__ : str = "audio" A__ : str = "transcription" def A__ ( self: int ,lowerCamelCase_: Union[str, Any] ) -> Optional[Any]: if self.audio_column not in features: raise ValueError(F'''Column {self.audio_column} is not present in features.''' ) if not isinstance(features[self.audio_column] ,lowerCamelCase_ ): raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' ) UpperCAmelCase_ : Any = copy.deepcopy(self ) UpperCAmelCase_ : Union[str, Any] = self.input_schema.copy() UpperCAmelCase_ : Any = features[self.audio_column] UpperCAmelCase_ : Union[str, Any] = input_schema return task_template @property def A__ ( self: List[str] ) -> Dict[str, str]: return {self.audio_column: "audio", self.transcription_column: "transcription"}
345
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE :List[Any] = { """configuration_x_clip""": [ """XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XCLIPConfig""", """XCLIPTextConfig""", """XCLIPVisionConfig""", ], """processing_x_clip""": ["""XCLIPProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :int = [ """XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """XCLIPModel""", """XCLIPPreTrainedModel""", """XCLIPTextModel""", """XCLIPVisionModel""", ] if TYPE_CHECKING: from .configuration_x_clip import ( XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, XCLIPConfig, XCLIPTextConfig, XCLIPVisionConfig, ) from .processing_x_clip import XCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_x_clip import ( XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, XCLIPModel, XCLIPPreTrainedModel, XCLIPTextModel, XCLIPVisionModel, ) else: import sys SCREAMING_SNAKE_CASE :Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
354
import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler SCREAMING_SNAKE_CASE :Tuple = 16 SCREAMING_SNAKE_CASE :Optional[Any] = 32 def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 1_6 , SCREAMING_SNAKE_CASE_ = "bert-base-cased" )-> Optional[int]: """simple docstring""" UpperCamelCase_ = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) UpperCamelCase_ = load_dataset("glue" , "mrpc" ) def tokenize_function(SCREAMING_SNAKE_CASE_ ): # max_length=None => use the model max length (it's actually the default) UpperCamelCase_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset UpperCamelCase_ = datasets.map( SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=SCREAMING_SNAKE_CASE_ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCamelCase_ = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(SCREAMING_SNAKE_CASE_ ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding="max_length" , max_length=1_2_8 , return_tensors="pt" ) return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding="longest" , return_tensors="pt" ) # Instantiate dataloaders. UpperCamelCase_ = DataLoader( tokenized_datasets["train"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ ) UpperCamelCase_ = DataLoader( tokenized_datasets["validation"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ ) return train_dataloader, eval_dataloader def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Any: """simple docstring""" model.eval() UpperCamelCase_ = 0 for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCamelCase_ = model(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase_ = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times UpperCamelCase_ , UpperCamelCase_ = accelerator.gather( (predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(SCREAMING_SNAKE_CASE_ ) - 1: UpperCamelCase_ = predictions[: len(eval_dataloader.dataset ) - samples_seen] UpperCamelCase_ = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ , ) UpperCamelCase_ = metric.compute() return eval_metric["accuracy"] def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Optional[Any]: """simple docstring""" UpperCamelCase_ = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCamelCase_ = config["lr"] UpperCamelCase_ = int(config["num_epochs"] ) UpperCamelCase_ = int(config["seed"] ) UpperCamelCase_ = int(config["batch_size"] ) UpperCamelCase_ = args.model_name_or_path set_seed(SCREAMING_SNAKE_CASE_ ) UpperCamelCase_ , UpperCamelCase_ = get_dataloaders(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCamelCase_ = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ ) # Instantiate optimizer UpperCamelCase_ = ( AdamW if accelerator.state.deepspeed_plugin is None or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) UpperCamelCase_ = optimizer_cls(params=model.parameters() , lr=SCREAMING_SNAKE_CASE_ ) if accelerator.state.deepspeed_plugin is not None: UpperCamelCase_ = accelerator.state.deepspeed_plugin.deepspeed_config[ "gradient_accumulation_steps" ] else: UpperCamelCase_ = 1 UpperCamelCase_ = (len(SCREAMING_SNAKE_CASE_ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): UpperCamelCase_ = get_linear_schedule_with_warmup( optimizer=SCREAMING_SNAKE_CASE_ , num_warmup_steps=0 , num_training_steps=SCREAMING_SNAKE_CASE_ , ) else: UpperCamelCase_ = DummyScheduler(SCREAMING_SNAKE_CASE_ , total_num_steps=SCREAMING_SNAKE_CASE_ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # We need to keep track of how many total steps we have iterated over UpperCamelCase_ = 0 # We also need to keep track of the stating epoch so files are named properly UpperCamelCase_ = 0 UpperCamelCase_ = evaluate.load("glue" , "mrpc" ) UpperCamelCase_ = num_epochs if args.partial_train_epoch is not None: UpperCamelCase_ = args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint ) UpperCamelCase_ = args.resume_from_checkpoint.split("epoch_" )[1] UpperCamelCase_ = "" for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break UpperCamelCase_ = int(SCREAMING_SNAKE_CASE_ ) + 1 UpperCamelCase_ = evaluation_loop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) accelerator.print("resumed checkpoint performance:" , SCREAMING_SNAKE_CASE_ ) accelerator.print("resumed checkpoint's scheduler's lr:" , lr_scheduler.get_lr()[0] ) accelerator.print("resumed optimizers's lr:" , optimizer.param_groups[0]["lr"] ) with open(os.path.join(args.output_dir , f"state_{starting_epoch-1}.json" ) , "r" ) as f: UpperCamelCase_ = json.load(SCREAMING_SNAKE_CASE_ ) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model UpperCamelCase_ = {} for epoch in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): model.train() for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ): UpperCamelCase_ = model(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase_ = outputs.loss UpperCamelCase_ = loss / gradient_accumulation_steps accelerator.backward(SCREAMING_SNAKE_CASE_ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 UpperCamelCase_ = f"epoch_{epoch}" UpperCamelCase_ = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ ) accelerator.save_state(SCREAMING_SNAKE_CASE_ ) UpperCamelCase_ = evaluation_loop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase_ = accuracy UpperCamelCase_ = lr_scheduler.get_lr()[0] UpperCamelCase_ = optimizer.param_groups[0]["lr"] UpperCamelCase_ = epoch UpperCamelCase_ = overall_step accelerator.print(f"epoch {epoch}:" , SCREAMING_SNAKE_CASE_ ) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , f"state_{epoch}.json" ) , "w" ) as f: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase( )-> Union[str, Any]: """simple docstring""" UpperCamelCase_ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." ) parser.add_argument( "--model_name_or_path" , type=SCREAMING_SNAKE_CASE_ , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=SCREAMING_SNAKE_CASE_ , ) parser.add_argument( "--output_dir" , type=SCREAMING_SNAKE_CASE_ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , ) parser.add_argument( "--resume_from_checkpoint" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="If the training should continue from a checkpoint folder." , ) parser.add_argument( "--partial_train_epoch" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="If passed, the training will stop after this number of epochs." , ) parser.add_argument( "--num_epochs" , type=SCREAMING_SNAKE_CASE_ , default=2 , help="Number of train epochs." , ) UpperCamelCase_ = parser.parse_args() UpperCamelCase_ = {"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 4_2, "batch_size": 1_6} training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": main()
60
0
import argparse import json import os import re import shutil import torch from transformers import BioGptConfig, BioGptForCausalLM from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() _snake_case = 2 class UpperCAmelCase_ : '''simple docstring''' def __init__( self , *, # begin keyword-only arguments __A="<s>" , __A="<pad>" , __A="</s>" , __A="<unk>" , __A=None , ): """simple docstring""" lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : List[str] = bos, unk, pad, eos lowerCamelCase : str = [] lowerCamelCase : Dict = [] lowerCamelCase : Optional[int] = {} lowerCamelCase : List[str] = self.add_symbol(snake_case_ ) lowerCamelCase : Any = self.add_symbol(snake_case_ ) lowerCamelCase : str = self.add_symbol(snake_case_ ) lowerCamelCase : List[str] = self.add_symbol(snake_case_ ) if extra_special_symbols: for s in extra_special_symbols: self.add_symbol(snake_case_ ) lowerCamelCase : Any = len(self.symbols ) def __eq__( self , __A ): """simple docstring""" return self.indices == other.indices def __getitem__( self , __A ): """simple docstring""" if idx < len(self.symbols ): return self.symbols[idx] return self.unk_word def __len__( self ): """simple docstring""" return len(self.symbols ) def __contains__( self , __A ): """simple docstring""" return sym in self.indices @classmethod def _snake_case ( cls , __A ): """simple docstring""" lowerCamelCase : List[str] = cls() d.add_from_file(snake_case_ ) return d def _snake_case ( self , __A , __A=1 , __A=False ): """simple docstring""" if word in self.indices and not overwrite: lowerCamelCase : List[Any] = self.indices[word] lowerCamelCase : Dict = self.count[idx] + n return idx else: lowerCamelCase : str = len(self.symbols ) lowerCamelCase : List[str] = idx self.symbols.append(snake_case_ ) self.count.append(snake_case_ ) return idx def _snake_case ( self , __A ): """simple docstring""" return 0 def _snake_case ( self , __A ): """simple docstring""" if isinstance(snake_case_ , snake_case_ ): try: with open(snake_case_ , "r" , encoding="utf-8" ) as fd: self.add_from_file(snake_case_ ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(snake_case_ ) ) return lowerCamelCase : List[Any] = f.readlines() lowerCamelCase : Union[str, Any] = self._load_meta(snake_case_ ) for line in lines[indices_start_line:]: try: lowerCamelCase , lowerCamelCase : int = line.rstrip().rsplit(" " , 1 ) if field == "#fairseq:overwrite": lowerCamelCase : str = True lowerCamelCase , lowerCamelCase : Dict = line.rsplit(" " , 1 ) else: lowerCamelCase : int = False lowerCamelCase : Optional[int] = int(snake_case_ ) lowerCamelCase : Any = line if word in self and not overwrite: raise RuntimeError( "Duplicate word found when loading Dictionary: '{}'. " "Duplicate words can overwrite earlier ones by adding the " "#fairseq:overwrite flag at the end of the corresponding row " "in the dictionary file. If using the Camembert model, please " "download an updated copy of the model file.".format(snake_case_ ) ) self.add_symbol(snake_case_ , n=snake_case_ , overwrite=snake_case_ ) except ValueError: raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'" ) def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCamelCase : Optional[int] = dict((re.sub(r"@@$" , "" , lowercase_ ), v) if k.endswith("@@" ) else (re.sub(r"$" , "</w>" , lowercase_ ), v) for k, v in d.items() ) lowerCamelCase : Optional[Any] = "<s> <pad> </s> <unk>".split() # restore the special tokens for k in keep_keys: del da[f"""{k}</w>"""] lowerCamelCase : Dict = d[k] # restore return da def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' if not os.path.exists(lowercase_ ): raise ValueError(f"""path {biogpt_checkpoint_path} does not exist!""" ) os.makedirs(lowercase_ , exist_ok=lowercase_ ) print(f"""Writing results to {pytorch_dump_folder_path}""" ) # handle various types of models lowerCamelCase : List[str] = os.path.join(lowercase_ , "checkpoint.pt" ) if not os.path.isfile(lowercase_ ): raise ValueError(f"""path to the file {checkpoint_file} does not exist!""" ) lowerCamelCase : Union[str, Any] = torch.load(lowercase_ , map_location="cpu" ) lowerCamelCase : Dict = chkpt["cfg"]["model"] # dicts lowerCamelCase : Any = os.path.join(lowercase_ , "dict.txt" ) if not os.path.isfile(lowercase_ ): raise ValueError(f"""path to the file {dict_file} does not exist!""" ) lowerCamelCase : Any = Dictionary.load(lowercase_ ) lowerCamelCase : Optional[int] = rewrite_dict_keys(src_dict.indices ) lowerCamelCase : Union[str, Any] = len(lowercase_ ) lowerCamelCase : Tuple = os.path.join(lowercase_ , VOCAB_FILES_NAMES["vocab_file"] ) print(f"""Generating {src_vocab_file} of {src_vocab_size} records""" ) with open(lowercase_ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) ) # merges_file (bpecodes) lowerCamelCase : str = os.path.join(lowercase_ , "bpecodes" ) if not os.path.isfile(lowercase_ ): raise ValueError(f"""path to the file {bpecodes_file} does not exist!""" ) lowerCamelCase : Optional[int] = os.path.join(lowercase_ , VOCAB_FILES_NAMES["merges_file"] ) shutil.copyfile(lowercase_ , lowercase_ ) # model config lowerCamelCase : Dict = os.path.join(lowercase_ , "config.json" ) lowerCamelCase : List[str] = { "activation_dropout": args["activation_dropout"], "architectures": ["BioGptForCausalLM"], "attention_probs_dropout_prob": args["attention_dropout"], "bos_token_id": 0, "eos_token_id": 2, "hidden_act": args["activation_fn"], "hidden_dropout_prob": args["dropout"], "hidden_size": args["decoder_embed_dim"], "initializer_range": 0.02, "intermediate_size": args["decoder_ffn_embed_dim"], "layer_norm_eps": 1E-12, "layerdrop": args["decoder_layerdrop"], "max_position_embeddings": args["max_target_positions"], "model_type": "biogpt", "num_attention_heads": args["decoder_attention_heads"], "num_hidden_layers": args["decoder_layers"], "pad_token_id": 1, "scale_embedding": not args["no_scale_embedding"], "tie_word_embeddings": args["share_decoder_input_output_embed"], "vocab_size": src_vocab_size, } # good hparam defaults to start with print(f"""Generating {biogpt_model_config_file}""" ) with open(lowercase_ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) ) # tokenizer config lowerCamelCase : Optional[int] = os.path.join(lowercase_ , lowercase_ ) lowerCamelCase : int = { "bos_token": "<s>", "eos_token": "</s>", "model_max_length": 1024, "pad_token": "<pad>", "special_tokens_map_file": None, "tokenizer_class": "BioGptTokenizer", "unk_token": "<unk>", } print(f"""Generating {biogpt_tokenizer_config_file}""" ) with open(lowercase_ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) ) # model lowerCamelCase : Union[str, Any] = chkpt["model"] # remove unneeded keys lowerCamelCase : List[Any] = [ "decoder.version", ] for k in ignore_keys: model_state_dict.pop(lowercase_ , lowercase_ ) lowerCamelCase : Dict = list(model_state_dict.keys() ) for layer_name in layer_names: if layer_name.endswith("output_projection.weight" ): lowerCamelCase : str = model_state_dict.pop(lowercase_ ) else: lowerCamelCase : Optional[Any] = model_state_dict.pop(lowercase_ ) lowerCamelCase : List[Any] = BioGptConfig.from_pretrained(lowercase_ ) lowerCamelCase : Tuple = BioGptForCausalLM(lowercase_ ) # check that it loads ok model_new.load_state_dict(lowercase_ ) # save lowerCamelCase : List[Any] = os.path.join(lowercase_ , lowercase_ ) print(f"""Generating {pytorch_weights_dump_path}""" ) torch.save(lowercase_ , lowercase_ ) print("Conversion is done!" ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--biogpt_checkpoint_path''', default=None, type=str, required=True, help=( '''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,''' ''' bpecodes, etc.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) _snake_case = parser.parse_args() convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
283
"""simple docstring""" import os from pathlib import Path def _SCREAMING_SNAKE_CASE ( ) -> Tuple: from torch.utils.cpp_extension import load A__ = Path(lowercase_ ).resolve().parent.parent.parent / "kernels" / "deformable_detr" A__ = [ root / filename for filename in [ "vision.cpp", os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ), os.path.join("cuda" , "ms_deform_attn_cuda.cu" ), ] ] load( "MultiScaleDeformableAttention" , lowercase_ , with_cuda=lowercase_ , extra_include_paths=[str(lowercase_ )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[ "-DCUDA_HAS_FP16=1", "-D__CUDA_NO_HALF_OPERATORS__", "-D__CUDA_NO_HALF_CONVERSIONS__", "-D__CUDA_NO_HALF2_OPERATORS__", ] , ) import MultiScaleDeformableAttention as MSDA return MSDA
247
0
from argparse import ArgumentParser from .add_new_model import AddNewModelCommand from .add_new_model_like import AddNewModelLikeCommand from .convert import ConvertCommand from .download import DownloadCommand from .env import EnvironmentCommand from .lfs import LfsCommands from .pt_to_tf import PTtoTFCommand from .run import RunCommand from .serving import ServeCommand from .user import UserCommands def SCREAMING_SNAKE_CASE__ ( ): snake_case_ : Dict = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' ) snake_case_ : int = parser.add_subparsers(help='transformers-cli command helpers' ) # Register commands ConvertCommand.register_subcommand(_UpperCamelCase ) DownloadCommand.register_subcommand(_UpperCamelCase ) EnvironmentCommand.register_subcommand(_UpperCamelCase ) RunCommand.register_subcommand(_UpperCamelCase ) ServeCommand.register_subcommand(_UpperCamelCase ) UserCommands.register_subcommand(_UpperCamelCase ) AddNewModelCommand.register_subcommand(_UpperCamelCase ) AddNewModelLikeCommand.register_subcommand(_UpperCamelCase ) LfsCommands.register_subcommand(_UpperCamelCase ) PTtoTFCommand.register_subcommand(_UpperCamelCase ) # Let's go snake_case_ : Union[str, Any] = parser.parse_args() if not hasattr(_UpperCamelCase , 'func' ): parser.print_help() exit(1 ) # Run snake_case_ : Any = args.func(_UpperCamelCase ) service.run() if __name__ == "__main__": main()
359
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class SCREAMING_SNAKE_CASE_ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ): __magic_name__: int = AltDiffusionPipeline __magic_name__: Any = TEXT_TO_IMAGE_PARAMS __magic_name__: Any = TEXT_TO_IMAGE_BATCH_PARAMS __magic_name__: Any = TEXT_TO_IMAGE_IMAGE_PARAMS __magic_name__: Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS def UpperCAmelCase_ ( self : List[Any] ) -> int: """simple docstring""" torch.manual_seed(0 ) snake_case_ : List[str] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) snake_case_ : Optional[Any] = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_A , set_alpha_to_one=_A , ) torch.manual_seed(0 ) snake_case_ : Union[str, Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) # TODO: address the non-deterministic text encoder (fails for save-load tests) # torch.manual_seed(0) # text_encoder_config = RobertaSeriesConfig( # hidden_size=32, # project_dim=32, # intermediate_size=37, # layer_norm_eps=1e-05, # num_attention_heads=4, # num_hidden_layers=5, # vocab_size=5002, # ) # text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config) torch.manual_seed(0 ) snake_case_ : Optional[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , ) snake_case_ : Any = CLIPTextModel(_A ) snake_case_ : Any = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' ) snake_case_ : Dict = 77 snake_case_ : List[Any] = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def UpperCAmelCase_ ( self : int , _A : Optional[int] , _A : int=0 ) -> Dict: """simple docstring""" if str(_A ).startswith('mps' ): snake_case_ : Union[str, Any] = torch.manual_seed(_A ) else: snake_case_ : Union[str, Any] = torch.Generator(device=_A ).manual_seed(_A ) snake_case_ : Any = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def UpperCAmelCase_ ( self : Optional[Any] ) -> Any: """simple docstring""" super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 ) def UpperCAmelCase_ ( self : List[Any] ) -> Dict: """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def UpperCAmelCase_ ( self : Dict ) -> Any: """simple docstring""" snake_case_ : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator snake_case_ : Any = self.get_dummy_components() torch.manual_seed(0 ) snake_case_ : Any = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , ) # TODO: remove after fixing the non-deterministic text encoder snake_case_ : Optional[Any] = RobertaSeriesModelWithTransformation(_A ) snake_case_ : Optional[Any] = text_encoder snake_case_ : Optional[Any] = AltDiffusionPipeline(**_A ) snake_case_ : List[Any] = alt_pipe.to(_A ) alt_pipe.set_progress_bar_config(disable=_A ) snake_case_ : Optional[Any] = self.get_dummy_inputs(_A ) snake_case_ : int = 'A photo of an astronaut' snake_case_ : Tuple = alt_pipe(**_A ) snake_case_ : Any = output.images snake_case_ : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) snake_case_ : Any = np.array( [0.5_7_4_8_1_6_2, 0.6_0_4_4_7_1_4_5, 0.4_8_8_2_1_2_1_7, 0.5_0_1_0_0_6_3_6, 0.5_4_3_1_1_8_5, 0.4_5_7_6_3_6_8_3, 0.4_9_6_5_7_6_9_6, 0.4_8_1_3_2_7_3_3, 0.4_7_5_7_3_0_9_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" snake_case_ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator snake_case_ : Any = self.get_dummy_components() snake_case_ : List[str] = PNDMScheduler(skip_prk_steps=_A ) torch.manual_seed(0 ) snake_case_ : Optional[int] = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , ) # TODO: remove after fixing the non-deterministic text encoder snake_case_ : Tuple = RobertaSeriesModelWithTransformation(_A ) snake_case_ : Any = text_encoder snake_case_ : Tuple = AltDiffusionPipeline(**_A ) snake_case_ : Dict = alt_pipe.to(_A ) alt_pipe.set_progress_bar_config(disable=_A ) snake_case_ : Dict = self.get_dummy_inputs(_A ) snake_case_ : Tuple = alt_pipe(**_A ) snake_case_ : int = output.images snake_case_ : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) snake_case_ : Optional[int] = np.array( [0.5_1_6_0_5_0_9_3, 0.5_7_0_7_2_4_1, 0.4_7_3_6_5_5_0_7, 0.5_0_5_7_8_8_8_6, 0.5_6_3_3_8_7_7, 0.4_6_4_2_5_0_3, 0.5_1_8_2_0_8_1, 0.4_8_7_6_3_4_8_4, 0.4_9_0_8_4_2_3_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def UpperCAmelCase_ ( self : List[Any] ) -> Tuple: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ ( self : int ) -> List[str]: """simple docstring""" snake_case_ : Optional[int] = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , safety_checker=_A ) snake_case_ : Optional[int] = alt_pipe.to(_A ) alt_pipe.set_progress_bar_config(disable=_A ) snake_case_ : str = 'A painting of a squirrel eating a burger' snake_case_ : Tuple = torch.manual_seed(0 ) snake_case_ : str = alt_pipe([prompt] , generator=_A , guidance_scale=6.0 , num_inference_steps=20 , output_type='np' ) snake_case_ : Any = output.images snake_case_ : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) snake_case_ : Union[str, Any] = np.array([0.1_0_1_0, 0.0_8_0_0, 0.0_7_9_4, 0.0_8_8_5, 0.0_8_4_3, 0.0_7_6_2, 0.0_7_6_9, 0.0_7_2_9, 0.0_5_8_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" snake_case_ : Optional[Any] = DDIMScheduler.from_pretrained('BAAI/AltDiffusion' , subfolder='scheduler' ) snake_case_ : Union[str, Any] = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , scheduler=_A , safety_checker=_A ) snake_case_ : List[str] = alt_pipe.to(_A ) alt_pipe.set_progress_bar_config(disable=_A ) snake_case_ : List[Any] = 'A painting of a squirrel eating a burger' snake_case_ : int = torch.manual_seed(0 ) snake_case_ : List[Any] = alt_pipe([prompt] , generator=_A , num_inference_steps=2 , output_type='numpy' ) snake_case_ : Any = output.images snake_case_ : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) snake_case_ : List[Any] = np.array([0.4_0_1_9, 0.4_0_5_2, 0.3_8_1_0, 0.4_1_1_9, 0.3_9_1_6, 0.3_9_8_2, 0.4_6_5_1, 0.4_1_9_5, 0.5_3_2_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
88
0
import logging import os from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional from tqdm import auto as tqdm_lib _A = { "debug": logging.DEBUG, "info": logging.INFO, "warning": logging.WARNING, "error": logging.ERROR, "critical": logging.CRITICAL, } _A = logging.WARNING def lowerCamelCase__ ( ): """simple docstring""" lowerCAmelCase_ = os.getenv("DATASETS_VERBOSITY" , __lowerCAmelCase ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( F"""Unknown option DATASETS_VERBOSITY={env_level_str}, """ F"""has to be one of: { ', '.join(log_levels.keys() ) }""" ) return _default_log_level def lowerCamelCase__ ( ): """simple docstring""" return __name__.split("." )[0] def lowerCamelCase__ ( ): """simple docstring""" return logging.getLogger(_get_library_name() ) def lowerCamelCase__ ( ): """simple docstring""" lowerCAmelCase_ = _get_library_root_logger() library_root_logger.setLevel(_get_default_logging_level() ) def lowerCamelCase__ ( ): """simple docstring""" lowerCAmelCase_ = _get_library_root_logger() library_root_logger.setLevel(logging.NOTSET ) def lowerCamelCase__ ( __lowerCAmelCase : Optional[str] = None ): """simple docstring""" if name is None: lowerCAmelCase_ = _get_library_name() return logging.getLogger(__lowerCAmelCase ) def lowerCamelCase__ ( ): """simple docstring""" return _get_library_root_logger().getEffectiveLevel() def lowerCamelCase__ ( __lowerCAmelCase : int ): """simple docstring""" _get_library_root_logger().setLevel(__lowerCAmelCase ) def lowerCamelCase__ ( ): """simple docstring""" return set_verbosity(__lowerCAmelCase ) def lowerCamelCase__ ( ): """simple docstring""" return set_verbosity(__lowerCAmelCase ) def lowerCamelCase__ ( ): """simple docstring""" return set_verbosity(__lowerCAmelCase ) def lowerCamelCase__ ( ): """simple docstring""" return set_verbosity(__lowerCAmelCase ) def lowerCamelCase__ ( ): """simple docstring""" lowerCAmelCase_ = False def lowerCamelCase__ ( ): """simple docstring""" lowerCAmelCase_ = True # Configure the library root logger at the module level (singleton-like) _configure_library_root_logger() class _lowerCAmelCase : def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Dict: # pylint: disable=unused-argument lowerCAmelCase_ = args[0] if args else None def __iter__( self ) -> Optional[Any]: return iter(self._iterator ) def __getattr__( self , _UpperCamelCase ) -> Optional[int]: def empty_fn(*_UpperCamelCase , **_UpperCamelCase ): # pylint: disable=unused-argument return return empty_fn def __enter__( self ) -> Any: return self def __exit__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[str]: return _A = True class _lowerCAmelCase : def __call__( self , *_UpperCamelCase , _UpperCamelCase=False , **_UpperCamelCase ) -> List[str]: if _tqdm_active and not disable: return tqdm_lib.tqdm(*_UpperCamelCase , **_UpperCamelCase ) else: return EmptyTqdm(*_UpperCamelCase , **_UpperCamelCase ) def __a ( self , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]: lowerCAmelCase_ = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*_UpperCamelCase , **_UpperCamelCase ) def __a ( self ) -> Optional[Any]: if _tqdm_active: return tqdm_lib.tqdm.get_lock() _A = _tqdm_cls() def lowerCamelCase__ ( ): """simple docstring""" global _tqdm_active return bool(_tqdm_active ) def lowerCamelCase__ ( ): """simple docstring""" global _tqdm_active lowerCAmelCase_ = True def lowerCamelCase__ ( ): """simple docstring""" global _tqdm_active lowerCAmelCase_ = False
231
import math def lowerCamelCase__ ( __lowerCAmelCase : int ): """simple docstring""" lowerCAmelCase_ = 0 lowerCAmelCase_ = 0 while num > 0: lowerCAmelCase_ = num % 8 lowerCAmelCase_ = octal + (remainder * math.floor(math.pow(10 , __lowerCAmelCase ) )) counter += 1 lowerCAmelCase_ = math.floor(num / 8 ) # basically /= 8 without remainder if any # This formatting removes trailing '.0' from `octal`. return F"""0o{int(__lowerCAmelCase )}""" def lowerCamelCase__ ( ): """simple docstring""" print("\n2 in octal is:" ) print(decimal_to_octal(2 ) ) # = 2 print("\n8 in octal is:" ) print(decimal_to_octal(8 ) ) # = 10 print("\n65 in octal is:" ) print(decimal_to_octal(65 ) ) # = 101 print("\n216 in octal is:" ) print(decimal_to_octal(216 ) ) # = 330 print("\n512 in octal is:" ) print(decimal_to_octal(512 ) ) # = 1000 print("\n" ) if __name__ == "__main__": main()
231
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_realm import RealmTokenizer _snake_case : List[str] = logging.get_logger(__name__) _snake_case : str = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} _snake_case : Optional[int] = { "vocab_file": { "google/realm-cc-news-pretrained-embedder": ( "https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt" ), "google/realm-cc-news-pretrained-encoder": ( "https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt" ), "google/realm-cc-news-pretrained-scorer": ( "https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt" ), "google/realm-cc-news-pretrained-openqa": ( "https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt" ), "google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt", "google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt", "google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt", "google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt", }, "tokenizer_file": { "google/realm-cc-news-pretrained-embedder": ( "https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont" ), "google/realm-cc-news-pretrained-encoder": ( "https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json" ), "google/realm-cc-news-pretrained-scorer": ( "https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json" ), "google/realm-cc-news-pretrained-openqa": ( "https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json" ), "google/realm-orqa-nq-openqa": ( "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json" ), "google/realm-orqa-nq-reader": ( "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json" ), "google/realm-orqa-wq-openqa": ( "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json" ), "google/realm-orqa-wq-reader": ( "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json" ), }, } _snake_case : Optional[Any] = { "google/realm-cc-news-pretrained-embedder": 512, "google/realm-cc-news-pretrained-encoder": 512, "google/realm-cc-news-pretrained-scorer": 512, "google/realm-cc-news-pretrained-openqa": 512, "google/realm-orqa-nq-openqa": 512, "google/realm-orqa-nq-reader": 512, "google/realm-orqa-wq-openqa": 512, "google/realm-orqa-wq-reader": 512, } _snake_case : Dict = { "google/realm-cc-news-pretrained-embedder": {"do_lower_case": True}, "google/realm-cc-news-pretrained-encoder": {"do_lower_case": True}, "google/realm-cc-news-pretrained-scorer": {"do_lower_case": True}, "google/realm-cc-news-pretrained-openqa": {"do_lower_case": True}, "google/realm-orqa-nq-openqa": {"do_lower_case": True}, "google/realm-orqa-nq-reader": {"do_lower_case": True}, "google/realm-orqa-wq-openqa": {"do_lower_case": True}, "google/realm-orqa-wq-reader": {"do_lower_case": True}, } class a (__lowercase ): """simple docstring""" __UpperCAmelCase : List[Any] = VOCAB_FILES_NAMES __UpperCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION __UpperCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase : List[str] = RealmTokenizer def __init__( self : str , lowerCamelCase : str=None , lowerCamelCase : List[str]=None , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : List[Any]="[UNK]" , lowerCamelCase : str="[SEP]" , lowerCamelCase : Optional[int]="[PAD]" , lowerCamelCase : List[str]="[CLS]" , lowerCamelCase : Any="[MASK]" , lowerCamelCase : List[str]=True , lowerCamelCase : Union[str, Any]=None , **lowerCamelCase : Tuple , ) -> Any: super().__init__( UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , **UpperCAmelCase__ , ) __snake_case : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , UpperCAmelCase__ ) != do_lower_case or normalizer_state.get("strip_accents" , UpperCAmelCase__ ) != strip_accents or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase__ ) != tokenize_chinese_chars ): __snake_case : Any = getattr(UpperCAmelCase__ , normalizer_state.pop("type" ) ) __snake_case : Optional[int] = do_lower_case __snake_case : int = strip_accents __snake_case : Optional[Any] = tokenize_chinese_chars __snake_case : Dict = normalizer_class(**UpperCAmelCase__ ) __snake_case : Union[str, Any] = do_lower_case def __snake_case ( self : Any , lowerCamelCase : Dict , **lowerCamelCase : Dict ) -> Optional[int]: __snake_case : Any = PaddingStrategy.MAX_LENGTH __snake_case : Dict = text __snake_case : Any = kwargs.pop("text_pair" , UpperCAmelCase__ ) __snake_case : Tuple = kwargs.pop("return_tensors" , UpperCAmelCase__ ) __snake_case : Any = { "input_ids": [], "attention_mask": [], "token_type_ids": [], } for idx, candidate_text in enumerate(UpperCAmelCase__ ): if batch_text_pair is not None: __snake_case : Optional[int] = batch_text_pair[idx] else: __snake_case : Optional[Any] = None __snake_case : List[Any] = super().__call__(UpperCAmelCase__ , UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ ) __snake_case : List[Any] = encoded_candidates.get("input_ids" ) __snake_case : Dict = encoded_candidates.get("attention_mask" ) __snake_case : List[str] = encoded_candidates.get("token_type_ids" ) if encoded_input_ids is not None: output_data["input_ids"].append(UpperCAmelCase__ ) if encoded_attention_mask is not None: output_data["attention_mask"].append(UpperCAmelCase__ ) if encoded_token_type_ids is not None: output_data["token_type_ids"].append(UpperCAmelCase__ ) __snake_case : List[Any] = {key: item for key, item in output_data.items() if len(UpperCAmelCase__ ) != 0} return BatchEncoding(UpperCAmelCase__ , tensor_type=UpperCAmelCase__ ) def __snake_case ( self : str , lowerCamelCase : Any , lowerCamelCase : Tuple=None ) -> Optional[int]: __snake_case : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __snake_case ( self : Dict , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ) -> List[int]: __snake_case : Union[str, Any] = [self.sep_token_id] __snake_case : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __snake_case ( self : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ) -> Tuple[str]: __snake_case : int = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ ) return tuple(UpperCAmelCase__ )
361
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ): return x if y == 0 else greatest_common_divisor(__lowerCamelCase , x % y ) def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ): return (x * y) // greatest_common_divisor(__lowerCamelCase , __lowerCamelCase ) def lowerCAmelCase_ ( __lowerCamelCase = 2_0 ): __snake_case : Optional[Any] = 1 for i in range(1 , n + 1 ): __snake_case : Any = lcm(__lowerCamelCase , __lowerCamelCase ) return g if __name__ == "__main__": print(f'''{solution() = }''')
134
0
import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def lowerCamelCase__ ( __lowerCamelCase : Any ): __UpperCAmelCase : str = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """_float_tensor""", """decoder.output_projection.weight""", ] for k in ignore_keys: state_dict.pop(UpperCamelCase__ , UpperCamelCase__ ) def lowerCamelCase__ ( __lowerCamelCase : List[Any] ): __UpperCAmelCase , __UpperCAmelCase : List[Any] = emb.weight.shape __UpperCAmelCase : Tuple = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ ) __UpperCAmelCase : List[Any] = emb.weight.data return lin_layer def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]="facebook/mbart-large-en-ro" , __lowerCamelCase : Tuple=False , __lowerCamelCase : Optional[int]=False ): __UpperCAmelCase : Union[str, Any] = torch.load(UpperCamelCase__ , map_location="""cpu""" )["""model"""] remove_ignore_keys_(UpperCamelCase__ ) __UpperCAmelCase : List[str] = state_dict["""encoder.embed_tokens.weight"""].shape[0] __UpperCAmelCase : Optional[Any] = MBartConfig.from_pretrained(UpperCamelCase__ , vocab_size=UpperCamelCase__ ) if mbart_aa and finetuned: __UpperCAmelCase : Optional[Any] = """relu""" __UpperCAmelCase : Optional[int] = state_dict["""decoder.embed_tokens.weight"""] __UpperCAmelCase : str = MBartForConditionalGeneration(UpperCamelCase__ ) model.model.load_state_dict(UpperCamelCase__ ) if finetuned: __UpperCAmelCase : str = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": a : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem." ) parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--hf_config", default="facebook/mbart-large-cc25", type=str, help="Which huggingface architecture to use: mbart-large", ) parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint") parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint") a : Optional[int] = parser.parse_args() a : Optional[int] = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
114
'''simple docstring''' import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class a__ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" __lowerCamelCase = '''ylacombe/bark-small''' __lowerCamelCase = tempfile.mkdtemp() __lowerCamelCase = '''en_speaker_1''' __lowerCamelCase = '''This is a test string''' __lowerCamelCase = '''speaker_embeddings_path.json''' __lowerCamelCase = '''speaker_embeddings''' def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , **a : Dict ): """simple docstring""" return AutoTokenizer.from_pretrained(self.checkpoint , **a ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = BarkProcessor(tokenizer=a ) processor.save_pretrained(self.tmpdirname ) __lowerCamelCase = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" __lowerCamelCase = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) __lowerCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __lowerCamelCase = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" __lowerCamelCase = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) __lowerCamelCase = 35 __lowerCamelCase = 2 __lowerCamelCase = 8 __lowerCamelCase = { '''semantic_prompt''': np.ones(a ), '''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ), '''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset __lowerCamelCase = processor(text=self.input_string , voice_preset=a ) __lowerCamelCase = inputs['''history_prompt'''] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a , np.array([] ) ).tolist() ) # test loading voice preset from npz file __lowerCamelCase = os.path.join(self.tmpdirname , '''file.npz''' ) np.savez(a , **a ) __lowerCamelCase = processor(text=self.input_string , voice_preset=a ) __lowerCamelCase = inputs['''history_prompt'''] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a , np.array([] ) ).tolist() ) # test loading voice preset from the hub __lowerCamelCase = processor(text=self.input_string , voice_preset=self.voice_preset ) def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = BarkProcessor(tokenizer=a ) __lowerCamelCase = processor(text=self.input_string ) __lowerCamelCase = tokenizer( self.input_string , padding='''max_length''' , max_length=2_56 , add_special_tokens=a , return_attention_mask=a , return_token_type_ids=a , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
67
0
'''simple docstring''' from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def _lowerCamelCase ( lowercase : int ) -> bool: _a = int(number**0.5 ) return number == sq * sq def _lowerCamelCase ( lowercase : int , lowercase : int , lowercase : int , lowercase : int , lowercase : int , lowercase : int ) -> tuple[int, int]: _a = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den _a = x_den * y_den * z_den _a = gcd(lowercase , lowercase ) top //= hcf bottom //= hcf return top, bottom def _lowerCamelCase ( lowercase : int = 35 ) -> int: _a = set() _a = 42 _a = Fraction(0 ) _a = 42 for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 _a = x_num * y_den + x_den * y_num _a = x_den * y_den _a = gcd(lowercase , lowercase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _a = add_three( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) unique_s.add(lowercase ) # n=2 _a = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) _a = x_den * x_den * y_den * y_den if is_sq(lowercase ) and is_sq(lowercase ): _a = int(sqrt(lowercase ) ) _a = int(sqrt(lowercase ) ) _a = gcd(lowercase , lowercase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _a = add_three( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) unique_s.add(lowercase ) # n=-1 _a = x_num * y_num _a = x_den * y_num + x_num * y_den _a = gcd(lowercase , lowercase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _a = add_three( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) unique_s.add(lowercase ) # n=2 _a = x_num * x_num * y_num * y_num _a = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(lowercase ) and is_sq(lowercase ): _a = int(sqrt(lowercase ) ) _a = int(sqrt(lowercase ) ) _a = gcd(lowercase , lowercase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _a = add_three( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) unique_s.add(lowercase ) for num, den in unique_s: total += Fraction(lowercase , lowercase ) return total.denominator + total.numerator if __name__ == "__main__": print(f"""{solution() = }""")
346
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. lowerCAmelCase_ : Dict = abspath(join(dirname(dirname(dirname(__file__))), 'src')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='ignore', category=FutureWarning) def _lowerCamelCase ( lowercase : str ) -> Optional[int]: from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(lowercase ) def _lowerCamelCase ( lowercase : Dict ) -> str: from transformers.testing_utils import pytest_terminal_summary_main _a = terminalreporter.config.getoption("--make-reports" ) if make_reports: pytest_terminal_summary_main(lowercase , id=lowercase )
346
1
from __future__ import annotations def __lowercase ( __lowerCAmelCase : int | str ): a__ = str(__lowerCAmelCase ) return n == n[::-1] def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_0_0_0 ): a__ = 0 for i in range(1 , __lowerCAmelCase ): if is_palindrome(__lowerCAmelCase ) and is_palindrome(bin(__lowerCAmelCase ).split('b' )[1] ): total += i return total if __name__ == "__main__": print(solution(int(str(input().strip()))))
240
import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers snake_case : List[str] = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append('''dataclasses''') if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append('''importlib_metadata''') for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""") def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple=None ): require_version(deps[pkg] , __lowerCAmelCase )
240
1
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {'''vocab_file''': '''spiece.model'''} __UpperCAmelCase = { '''vocab_file''': { '''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''', '''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''', } } __UpperCAmelCase = { '''xlnet-base-cased''': None, '''xlnet-large-cased''': None, } # Segments (not really needed) __UpperCAmelCase = 0 __UpperCAmelCase = 1 __UpperCAmelCase = 2 __UpperCAmelCase = 3 __UpperCAmelCase = 4 class lowerCamelCase__ ( _a ): _lowerCAmelCase = VOCAB_FILES_NAMES _lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCAmelCase = '''left''' def __init__( self : Dict , _a : List[Any] , _a : Any=False , _a : int=True , _a : Union[str, Any]=False , _a : Dict="<s>" , _a : str="</s>" , _a : Optional[int]="<unk>" , _a : Union[str, Any]="<sep>" , _a : List[Any]="<pad>" , _a : Optional[Any]="<cls>" , _a : str="<mask>" , _a : Any=["<eop>", "<eod>"] , _a : Optional[Dict[str, Any]] = None , **_a : Optional[int] , ): # Mask token behave like a normal word, i.e. include the space before it a__: Dict =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token a__: Optional[int] ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , additional_special_tokens=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , ) a__: Dict =3 a__: Tuple =do_lower_case a__: int =remove_space a__: List[Any] =keep_accents a__: List[str] =vocab_file a__: Union[str, Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_a ) @property def _lowerCamelCase ( self : Any ): return len(self.sp_model ) def _lowerCamelCase ( self : List[Any] ): a__: Dict ={self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Tuple ): a__: Dict =self.__dict__.copy() a__: List[Any] =None return state def __setstate__( self : Optional[Any] , _a : Tuple ): a__: List[Any] =d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): a__: List[str] ={} a__: int =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _lowerCamelCase ( self : Dict , _a : str ): if self.remove_space: a__: Optional[int] =" ".join(inputs.strip().split() ) else: a__: Optional[int] =inputs a__: Dict =outputs.replace("``" , "\"" ).replace("''" , "\"" ) if not self.keep_accents: a__: Optional[int] =unicodedata.normalize("NFKD" , _a ) a__: int ="".join([c for c in outputs if not unicodedata.combining(_a )] ) if self.do_lower_case: a__: Dict =outputs.lower() return outputs def _lowerCamelCase ( self : List[Any] , _a : str ): a__: Dict =self.preprocess_text(_a ) a__: Dict =self.sp_model.encode(_a , out_type=_a ) a__: str =[] for piece in pieces: if len(_a ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit(): a__: Optional[Any] =self.sp_model.EncodeAsPieces(piece[:-1].replace(_a , "" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: a__: Optional[int] =cur_pieces[1:] else: a__: Tuple =cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(_a ) else: new_pieces.append(_a ) return new_pieces def _lowerCamelCase ( self : Dict , _a : Dict ): return self.sp_model.PieceToId(_a ) def _lowerCamelCase ( self : Dict , _a : Optional[Any] ): return self.sp_model.IdToPiece(_a ) def _lowerCamelCase ( self : Optional[Any] , _a : Tuple ): a__: Tuple ="".join(_a ).replace(_a , " " ).strip() return out_string def _lowerCamelCase ( self : Optional[int] , _a : List[int] , _a : bool = False , _a : bool = None , _a : bool = True , **_a : Union[str, Any] , ): a__: Optional[int] =kwargs.pop("use_source_tokenizer" , _a ) a__: Any =self.convert_ids_to_tokens(_a , skip_special_tokens=_a ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 a__: List[str] =[] a__: Any =[] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_a ) ) a__: List[str] =[] sub_texts.append(_a ) else: current_sub_text.append(_a ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_a ) ) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens a__: Union[str, Any] ="".join(_a ) a__: List[Any] =( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: a__: Optional[int] =self.clean_up_tokenization(_a ) return clean_text else: return text def _lowerCamelCase ( self : Tuple , _a : List[int] , _a : Optional[List[int]] = None ): a__: Dict =[self.sep_token_id] a__: Optional[Any] =[self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _lowerCamelCase ( self : Dict , _a : List[int] , _a : Optional[List[int]] = None , _a : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a ) if token_ids_a is not None: return ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1, 1] return ([0] * len(_a )) + [1, 1] def _lowerCamelCase ( self : Dict , _a : List[int] , _a : Optional[List[int]] = None ): a__: Any =[self.sep_token_id] a__: List[Any] =[2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _lowerCamelCase ( self : List[str] , _a : str , _a : Optional[str] = None ): if not os.path.isdir(_a ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return a__: List[Any] =os.path.join( _a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _a ) elif not os.path.isfile(self.vocab_file ): with open(_a , "wb" ) as fi: a__: Optional[Any] =self.sp_model.serialized_model_proto() fi.write(_a ) return (out_vocab_file,)
42
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { '''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''', '''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class lowerCamelCase__ ( _a ): _lowerCAmelCase = '''mobilenet_v1''' def __init__( self : int , _a : Tuple=3 , _a : str=2_2_4 , _a : Dict=1.0 , _a : List[Any]=8 , _a : Tuple="relu6" , _a : Dict=True , _a : Optional[int]=0.9_9_9 , _a : List[Any]=0.0_2 , _a : Optional[Any]=0.0_0_1 , **_a : Optional[int] , ): super().__init__(**_a ) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero." ) a__: str =num_channels a__: Union[str, Any] =image_size a__: Dict =depth_multiplier a__: Union[str, Any] =min_depth a__: Any =hidden_act a__: int =tf_padding a__: Dict =classifier_dropout_prob a__: Any =initializer_range a__: List[str] =layer_norm_eps class lowerCamelCase__ ( _a ): _lowerCAmelCase = version.parse('''1.11''' ) @property def _lowerCamelCase ( self : int ): return OrderedDict([("pixel_values", {0: "batch"})] ) @property def _lowerCamelCase ( self : Tuple ): if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})] ) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] ) @property def _lowerCamelCase ( self : Dict ): return 1e-4
42
1
from typing import List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { """huggingface/autoformer-tourism-monthly""": """https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json""", } class _UpperCamelCase ( _UpperCAmelCase ): '''simple docstring''' lowerCamelCase__ ='autoformer' lowerCamelCase__ ={ 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', 'num_hidden_layers': 'encoder_layers', } def __init__( self : Tuple , a : Any = None , a : Optional[int] = None , a : List[Any] = "student_t" , a : Dict = "nll" , a : str = 1 , a : Dict = [1, 2, 3, 4, 5, 6, 7] , a : List[str] = True , a : str = 0 , a : Dict = 0 , a : str = 0 , a : str = 0 , a : Tuple = None , a : Dict = None , a : Dict = 64 , a : Optional[int] = 2 , a : List[str] = 2 , a : Optional[Any] = 2 , a : Optional[int] = 2 , a : Tuple = 32 , a : Optional[int] = 32 , a : Any = "gelu" , a : Tuple = 0.1 , a : int = 0.1 , a : Dict = 0.1 , a : Optional[Any] = 0.1 , a : str = 0.1 , a : Optional[int] = 100 , a : List[str] = 0.02 , a : Tuple = True , a : Any=True , a : Optional[Any] = 10 , a : List[str] = 25 , a : Optional[int] = 3 , **a : Union[str, Any] , ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = prediction_length SCREAMING_SNAKE_CASE : int = context_length if context_length is not None else prediction_length SCREAMING_SNAKE_CASE : str = distribution_output SCREAMING_SNAKE_CASE : Union[str, Any] = loss SCREAMING_SNAKE_CASE : Optional[int] = input_size SCREAMING_SNAKE_CASE : str = num_time_features SCREAMING_SNAKE_CASE : Optional[int] = lags_sequence SCREAMING_SNAKE_CASE : List[str] = scaling SCREAMING_SNAKE_CASE : Any = num_dynamic_real_features SCREAMING_SNAKE_CASE : Optional[int] = num_static_real_features SCREAMING_SNAKE_CASE : Tuple = num_static_categorical_features if cardinality is not None and num_static_categorical_features > 0: if len(SCREAMING_SNAKE_CASE_ ) != num_static_categorical_features: raise ValueError( "The cardinality should be a list of the same length as `num_static_categorical_features`" ) SCREAMING_SNAKE_CASE : List[Any] = cardinality else: SCREAMING_SNAKE_CASE : Optional[int] = [0] if embedding_dimension is not None and num_static_categorical_features > 0: if len(SCREAMING_SNAKE_CASE_ ) != num_static_categorical_features: raise ValueError( "The embedding dimension should be a list of the same length as `num_static_categorical_features`" ) SCREAMING_SNAKE_CASE : Dict = embedding_dimension else: SCREAMING_SNAKE_CASE : Optional[Any] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] SCREAMING_SNAKE_CASE : str = num_parallel_samples # Transformer architecture configuration SCREAMING_SNAKE_CASE : Optional[Any] = input_size * len(self.lags_sequence ) + self._number_of_features SCREAMING_SNAKE_CASE : int = d_model SCREAMING_SNAKE_CASE : str = encoder_attention_heads SCREAMING_SNAKE_CASE : List[Any] = decoder_attention_heads SCREAMING_SNAKE_CASE : Dict = encoder_ffn_dim SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_ffn_dim SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_layers SCREAMING_SNAKE_CASE : Tuple = decoder_layers SCREAMING_SNAKE_CASE : Optional[Any] = dropout SCREAMING_SNAKE_CASE : Any = attention_dropout SCREAMING_SNAKE_CASE : Optional[int] = activation_dropout SCREAMING_SNAKE_CASE : Any = encoder_layerdrop SCREAMING_SNAKE_CASE : Optional[Any] = decoder_layerdrop SCREAMING_SNAKE_CASE : int = activation_function SCREAMING_SNAKE_CASE : str = init_std SCREAMING_SNAKE_CASE : Dict = use_cache # Autoformer SCREAMING_SNAKE_CASE : Optional[Any] = label_length SCREAMING_SNAKE_CASE : List[str] = moving_average SCREAMING_SNAKE_CASE : Dict = autocorrelation_factor super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) @property def __UpperCamelCase ( self : str ) -> int: """simple docstring""" return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
76
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A__ : List[Any] = { """configuration_megatron_bert""": ["""MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegatronBertConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : int = [ """MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """MegatronBertForCausalLM""", """MegatronBertForMaskedLM""", """MegatronBertForMultipleChoice""", """MegatronBertForNextSentencePrediction""", """MegatronBertForPreTraining""", """MegatronBertForQuestionAnswering""", """MegatronBertForSequenceClassification""", """MegatronBertForTokenClassification""", """MegatronBertModel""", """MegatronBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_megatron_bert import ( MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, MegatronBertPreTrainedModel, ) else: import sys A__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
185
0
import re import time from typing import Optional import IPython.display as disp from ..trainer_callback import TrainerCallback from ..trainer_utils import IntervalStrategy, has_length def A__ ( SCREAMING_SNAKE_CASE__) -> Optional[int]: __snake_case: Any = int(lowerCamelCase__) __snake_case , __snake_case , __snake_case: str = t // 3600, (t // 60) % 60, t % 60 return F'''{h}:{m:02d}:{s:02d}''' if h != 0 else F'''{m:02d}:{s:02d}''' def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=300) -> List[Any]: return F'''\n <div>\n {prefix}\n <progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>\n {label}\n </div>\n ''' def A__ ( SCREAMING_SNAKE_CASE__) -> str: __snake_case: Optional[int] = """<table border=\"1\" class=\"dataframe\">\n""" html_code += """ <thead>\n <tr style="text-align: left;">\n""" for i in items[0]: html_code += F''' <th>{i}</th>\n''' html_code += " </tr>\n </thead>\n <tbody>\n" for line in items[1:]: html_code += " <tr>\n" for elt in line: __snake_case: Optional[int] = F'''{elt:.6f}''' if isinstance(lowerCamelCase__ , lowerCamelCase__) else str(lowerCamelCase__) html_code += F''' <td>{elt}</td>\n''' html_code += " </tr>\n" html_code += " </tbody>\n</table><p>" return html_code class __snake_case : lowerCAmelCase__ = 5 lowerCAmelCase__ = 0.2 def __init__( self : Union[str, Any] , A : Tuple , A : str = None , A : str = True , A : Any = None , A : List[str] = 300 , ): __snake_case: Union[str, Any] = total __snake_case: List[str] = """""" if prefix is None else prefix __snake_case: Optional[Any] = leave __snake_case: Dict = parent __snake_case: str = width __snake_case: Optional[Any] = None __snake_case: Any = None __snake_case: List[str] = None def UpperCAmelCase__ ( self : Dict , A : List[Any] , A : Any = False , A : int = None ): __snake_case: List[str] = value if comment is not None: __snake_case: List[str] = comment if self.last_value is None: __snake_case: str = time.time() __snake_case: Tuple = value __snake_case: str = None __snake_case: Union[str, Any] = self.warmup __snake_case: Union[str, Any] = 1 self.update_bar(A ) elif value <= self.last_value and not force_update: return elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ): if self.first_calls > 0: self.first_calls -= 1 __snake_case: List[str] = time.time() __snake_case: List[str] = current_time - self.start_time # We could have value = self.start_value if the update is called twixe with the same start value. if value > self.start_value: __snake_case: int = self.elapsed_time / (value - self.start_value) else: __snake_case: Dict = None if value >= self.total: __snake_case: Optional[Any] = self.total __snake_case: Union[str, Any] = None if not self.leave: self.close() elif self.average_time_per_item is not None: __snake_case: Optional[Any] = self.average_time_per_item * (self.total - value) self.update_bar(A ) __snake_case: int = value __snake_case: Union[str, Any] = current_time if self.average_time_per_item is None: __snake_case: Union[str, Any] = 1 else: __snake_case: str = max(int(self.update_every / self.average_time_per_item ) , 1 ) def UpperCAmelCase__ ( self : List[Any] , A : Tuple , A : str=None ): __snake_case: str = """ """ * (len(str(self.total ) ) - len(str(A ) )) + str(A ) if self.elapsed_time is None: __snake_case: str = f'''[{spaced_value}/{self.total} : < :''' elif self.predicted_remaining is None: __snake_case: Union[str, Any] = f'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}''' else: __snake_case: Optional[int] = ( f'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <''' f''' {format_time(self.predicted_remaining )}''' ) self.label += f''', {1/self.average_time_per_item:.2f} it/s''' self.label += "]" if self.comment is None or len(self.comment ) == 0 else f''', {self.comment}]''' self.display() def UpperCAmelCase__ ( self : Tuple ): __snake_case: Optional[int] = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width ) if self.parent is not None: # If this is a child bar, the parent will take care of the display. self.parent.display() return if self.output is None: __snake_case: Dict = disp.display(disp.HTML(self.html_code ) , display_id=A ) else: self.output.update(disp.HTML(self.html_code ) ) def UpperCAmelCase__ ( self : int ): if self.parent is None and self.output is not None: self.output.update(disp.HTML("""""" ) ) class __snake_case ( a_ ): def __init__( self : Dict , A : Dict , A : Optional[Any]=None ): super().__init__(A ) __snake_case: Optional[int] = None if column_names is None else [column_names] __snake_case: Optional[int] = None def UpperCAmelCase__ ( self : List[str] ): __snake_case: Any = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width ) if self.inner_table is not None: self.html_code += text_to_html_table(self.inner_table ) if self.child_bar is not None: self.html_code += self.child_bar.html_code if self.output is None: __snake_case: List[str] = disp.display(disp.HTML(self.html_code ) , display_id=A ) else: self.output.update(disp.HTML(self.html_code ) ) def UpperCAmelCase__ ( self : Tuple , A : Any ): if self.inner_table is None: __snake_case: str = [list(values.keys() ), list(values.values() )] else: __snake_case: Dict = self.inner_table[0] if len(self.inner_table ) == 1: # We give a chance to update the column names at the first iteration for key in values.keys(): if key not in columns: columns.append(A ) __snake_case: Any = columns self.inner_table.append([values[c] for c in columns] ) def UpperCAmelCase__ ( self : Union[str, Any] , A : List[Any] , A : Union[str, Any]=None , A : Any=300 ): __snake_case: str = NotebookProgressBar(A , prefix=A , parent=self , width=A ) return self.child_bar def UpperCAmelCase__ ( self : Union[str, Any] ): __snake_case: Dict = None self.display() class __snake_case ( a_ ): def __init__( self : Any ): __snake_case: Optional[Any] = None __snake_case: List[str] = None __snake_case: int = False def UpperCAmelCase__ ( self : Optional[int] , A : Tuple , A : List[Any] , A : int , **A : Tuple ): __snake_case: List[Any] = """Epoch""" if args.evaluation_strategy == IntervalStrategy.EPOCH else """Step""" __snake_case: Optional[Any] = 0 __snake_case: Dict = 0 __snake_case: Optional[int] = [self.first_column] + ["""Training Loss"""] if args.evaluation_strategy != IntervalStrategy.NO: column_names.append("""Validation Loss""" ) __snake_case: Tuple = NotebookTrainingTracker(state.max_steps , A ) def UpperCAmelCase__ ( self : List[Any] , A : Tuple , A : Tuple , A : Optional[int] , **A : Any ): __snake_case: int = int(state.epoch ) if int(state.epoch ) == state.epoch else f'''{state.epoch:.2f}''' self.training_tracker.update( state.global_step + 1 , comment=f'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , ) __snake_case: Union[str, Any] = False def UpperCAmelCase__ ( self : Tuple , A : Any , A : Dict , A : Union[str, Any] , A : Any=None , **A : int ): if not has_length(A ): return if self.prediction_bar is None: if self.training_tracker is not None: __snake_case: List[Any] = self.training_tracker.add_child(len(A ) ) else: __snake_case: str = NotebookProgressBar(len(A ) ) self.prediction_bar.update(1 ) else: self.prediction_bar.update(self.prediction_bar.value + 1 ) def UpperCAmelCase__ ( self : List[str] , A : Optional[Any] , A : int , A : Any , **A : Optional[int] ): if self.prediction_bar is not None: self.prediction_bar.close() __snake_case: Union[str, Any] = None def UpperCAmelCase__ ( self : List[str] , A : List[str] , A : int , A : Tuple , A : Optional[int]=None , **A : List[str] ): if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs: __snake_case: str = {"""Training Loss""": logs["""loss"""]} # First column is necessarily Step sine we're not in epoch eval strategy __snake_case: Optional[int] = state.global_step self.training_tracker.write_line(A ) def UpperCAmelCase__ ( self : Optional[int] , A : Dict , A : str , A : List[Any] , A : int=None , **A : Any ): if self.training_tracker is not None: __snake_case: List[str] = {"""Training Loss""": """No log""", """Validation Loss""": """No log"""} for log in reversed(state.log_history ): if "loss" in log: __snake_case: Optional[int] = log["""loss"""] break if self.first_column == "Epoch": __snake_case: Union[str, Any] = int(state.epoch ) else: __snake_case: int = state.global_step __snake_case: Optional[int] = """eval""" for k in metrics: if k.endswith("""_loss""" ): __snake_case: str = re.sub(r"""\_loss$""" , """""" , A ) __snake_case: Union[str, Any] = metrics.pop("""total_flos""" , A ) __snake_case: Any = metrics.pop("""epoch""" , A ) __snake_case: List[Any] = metrics.pop(f'''{metric_key_prefix}_runtime''' , A ) __snake_case: Optional[Any] = metrics.pop(f'''{metric_key_prefix}_samples_per_second''' , A ) __snake_case: Any = metrics.pop(f'''{metric_key_prefix}_steps_per_second''' , A ) __snake_case: int = metrics.pop(f'''{metric_key_prefix}_jit_compilation_time''' , A ) for k, v in metrics.items(): if k == f'''{metric_key_prefix}_loss''': __snake_case: List[str] = v else: __snake_case: Optional[int] = k.split("""_""" ) __snake_case: str = """ """.join([part.capitalize() for part in splits[1:]] ) __snake_case: int = v self.training_tracker.write_line(A ) self.training_tracker.remove_child() __snake_case: Tuple = None # Evaluation takes a long time so we should force the next update. __snake_case: List[str] = True def UpperCAmelCase__ ( self : Tuple , A : Union[str, Any] , A : List[str] , A : Optional[Any] , **A : int ): self.training_tracker.update( state.global_step , comment=f'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=A ) __snake_case: str = None
350
import math import time from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class __snake_case ( __lowerCamelCase ): '''simple docstring''' def __init__( self : str , *A : Dict , A : Optional[int]=None , A : Tuple=None , **A : Optional[int] ): super().__init__(*A , **A ) __snake_case: List[Any] = eval_examples __snake_case: str = post_process_function def UpperCAmelCase__ ( self : List[Any] , A : Dict=None , A : int=None , A : List[Any]=None , A : str = "eval" ): __snake_case: int = self.eval_dataset if eval_dataset is None else eval_dataset __snake_case: Any = self.get_eval_dataloader(A ) __snake_case: Optional[Any] = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. __snake_case: Union[str, Any] = self.compute_metrics __snake_case: List[str] = None __snake_case: Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop __snake_case: Tuple = time.time() try: __snake_case: Any = eval_loop( A , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A , metric_key_prefix=A , ) finally: __snake_case: Optional[int] = compute_metrics __snake_case: Union[str, Any] = self.args.eval_batch_size * self.args.world_size if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( A , A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default __snake_case: List[str] = self.post_process_function(A , A , output.predictions ) __snake_case: List[Any] = self.compute_metrics(A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f'''{metric_key_prefix}_''' ): __snake_case: str = metrics.pop(A ) metrics.update(output.metrics ) else: __snake_case: List[Any] = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(A ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) __snake_case: str = self.callback_handler.on_evaluate(self.args , self.state , self.control , A ) return metrics def UpperCAmelCase__ ( self : Optional[Any] , A : List[Any] , A : List[str] , A : str=None , A : str = "test" ): __snake_case: Optional[Any] = self.get_test_dataloader(A ) # Temporarily disable metric computation, we will do it in the loop here. __snake_case: Optional[int] = self.compute_metrics __snake_case: List[Any] = None __snake_case: str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop __snake_case: Dict = time.time() try: __snake_case: str = eval_loop( A , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A , metric_key_prefix=A , ) finally: __snake_case: List[Any] = compute_metrics __snake_case: Dict = self.args.eval_batch_size * self.args.world_size if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( A , A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output __snake_case: Union[str, Any] = self.post_process_function(A , A , output.predictions , """predict""" ) __snake_case: str = self.compute_metrics(A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f'''{metric_key_prefix}_''' ): __snake_case: List[str] = metrics.pop(A ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=A )
293
0
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ): """simple docstring""" if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): raise ValueError('''iterations must be defined as integers''' ) if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not number >= 1: raise ValueError( '''starting number must be and integer and be more than 0''' ) if not iterations >= 1: raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' ) a :Optional[Any] = '''''' while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(UpperCAmelCase_ ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
94
def _a ( SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]: """simple docstring""" __lowerCAmelCase: str = len(SCREAMING_SNAKE_CASE ) __lowerCAmelCase: List[Any] = sum(SCREAMING_SNAKE_CASE ) __lowerCAmelCase: str = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): __lowerCAmelCase: Tuple = True for i in range(1 , s + 1 ): __lowerCAmelCase: Any = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): __lowerCAmelCase: Optional[int] = dp[i][j - 1] if arr[i - 1] <= j: __lowerCAmelCase: Union[str, Any] = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: __lowerCAmelCase: Tuple = s - 2 * j break return diff
322
0
from collections.abc import Sequence def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Sequence[int] | None = None ) -> int: if nums is None or not nums: raise ValueError("""Input sequence should not be empty""" ) __lowerCAmelCase : Optional[Any] = nums[0] for i in range(1 , len(SCREAMING_SNAKE_CASE ) ): __lowerCAmelCase : List[str] = nums[i] __lowerCAmelCase : Any = max(SCREAMING_SNAKE_CASE , ans + num , SCREAMING_SNAKE_CASE ) return ans if __name__ == "__main__": import doctest doctest.testmod() # Try on a sample input from the user _UpperCAmelCase = int(input('Enter number of elements : ').strip()) _UpperCAmelCase = list(map(int, input('\nEnter the numbers : ').strip().split()))[:n] print(max_subsequence_sum(array))
232
import inspect import unittest import numpy as np from transformers import ViTConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel class snake_case_ ( unittest.TestCase ): def __init__( self : List[Any] , _snake_case : List[Any] , _snake_case : str=13 , _snake_case : int=30 , _snake_case : str=2 , _snake_case : int=3 , _snake_case : Optional[Any]=True , _snake_case : str=True , _snake_case : Optional[int]=32 , _snake_case : Dict=5 , _snake_case : Optional[int]=4 , _snake_case : List[Any]=37 , _snake_case : Union[str, Any]="gelu" , _snake_case : str=0.1 , _snake_case : str=0.1 , _snake_case : str=10 , _snake_case : Any=0.02 , )->Tuple: '''simple docstring''' __lowerCAmelCase : Optional[int] = parent __lowerCAmelCase : Any = batch_size __lowerCAmelCase : int = image_size __lowerCAmelCase : int = patch_size __lowerCAmelCase : List[Any] = num_channels __lowerCAmelCase : str = is_training __lowerCAmelCase : str = use_labels __lowerCAmelCase : List[str] = hidden_size __lowerCAmelCase : Dict = num_hidden_layers __lowerCAmelCase : List[str] = num_attention_heads __lowerCAmelCase : Any = intermediate_size __lowerCAmelCase : List[str] = hidden_act __lowerCAmelCase : List[str] = hidden_dropout_prob __lowerCAmelCase : Any = attention_probs_dropout_prob __lowerCAmelCase : Tuple = type_sequence_label_size __lowerCAmelCase : Union[str, Any] = initializer_range # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) __lowerCAmelCase : Tuple = (image_size // patch_size) ** 2 __lowerCAmelCase : Optional[Any] = num_patches + 1 def UpperCAmelCase__ ( self : Any )->Any: '''simple docstring''' __lowerCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCAmelCase : Tuple = ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_snake_case , initializer_range=self.initializer_range , ) return config, pixel_values def UpperCAmelCase__ ( self : List[Any] , _snake_case : List[str] , _snake_case : List[Any] )->Any: '''simple docstring''' __lowerCAmelCase : str = FlaxViTModel(config=_snake_case ) __lowerCAmelCase : int = model(_snake_case ) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) __lowerCAmelCase : Dict = (self.image_size, self.image_size) __lowerCAmelCase : Any = (self.patch_size, self.patch_size) __lowerCAmelCase : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) ) def UpperCAmelCase__ ( self : Dict , _snake_case : str , _snake_case : List[Any] )->Union[str, Any]: '''simple docstring''' __lowerCAmelCase : Tuple = self.type_sequence_label_size __lowerCAmelCase : Tuple = FlaxViTForImageClassification(config=_snake_case ) __lowerCAmelCase : List[str] = model(_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __lowerCAmelCase : str = 1 __lowerCAmelCase : Any = FlaxViTForImageClassification(_snake_case ) __lowerCAmelCase : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __lowerCAmelCase : Dict = model(_snake_case ) def UpperCAmelCase__ ( self : str )->Any: '''simple docstring''' __lowerCAmelCase : Any = self.prepare_config_and_inputs() ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) : Union[str, Any] = config_and_inputs __lowerCAmelCase : str = {"""pixel_values""": pixel_values} return config, inputs_dict @require_flax class snake_case_ ( __lowercase ,unittest.TestCase ): A_ = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else () def UpperCAmelCase__ ( self : str )->None: '''simple docstring''' __lowerCAmelCase : List[str] = FlaxViTModelTester(self ) __lowerCAmelCase : Optional[Any] = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 ) def UpperCAmelCase__ ( self : Any )->Dict: '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : Any )->Any: '''simple docstring''' __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def UpperCAmelCase__ ( self : Union[str, Any] )->str: '''simple docstring''' __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_snake_case ) def UpperCAmelCase__ ( self : int )->int: '''simple docstring''' __lowerCAmelCase , __lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase : int = model_class(_snake_case ) __lowerCAmelCase : Optional[int] = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCAmelCase : Tuple = [*signature.parameters.keys()] __lowerCAmelCase : str = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _snake_case ) def UpperCAmelCase__ ( self : str )->str: '''simple docstring''' __lowerCAmelCase , __lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __lowerCAmelCase : Optional[int] = self._prepare_for_class(_snake_case , _snake_case ) __lowerCAmelCase : List[str] = model_class(_snake_case ) @jax.jit def model_jitted(_snake_case : Dict , **_snake_case : Union[str, Any] ): return model(pixel_values=_snake_case , **_snake_case ) with self.subTest("""JIT Enabled""" ): __lowerCAmelCase : List[Any] = model_jitted(**_snake_case ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __lowerCAmelCase : str = model_jitted(**_snake_case ).to_tuple() self.assertEqual(len(_snake_case ) , len(_snake_case ) ) for jitted_output, output in zip(_snake_case , _snake_case ): self.assertEqual(jitted_output.shape , output.shape ) @slow def UpperCAmelCase__ ( self : Dict )->str: '''simple docstring''' for model_class_name in self.all_model_classes: __lowerCAmelCase : List[str] = model_class_name.from_pretrained("""google/vit-base-patch16-224""" ) __lowerCAmelCase : List[str] = model(np.ones((1, 3, 224, 224) ) ) self.assertIsNotNone(_snake_case )
232
1
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { "salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json", } class snake_case_ ( _SCREAMING_SNAKE_CASE ): __A : Any = "blip_2_vision_model" def __init__( self : Tuple , lowercase_ : Optional[int]=14_08 , lowercase_ : Any=61_44 , lowercase_ : List[str]=39 , lowercase_ : Tuple=16 , lowercase_ : Optional[int]=2_24 , lowercase_ : str=14 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : List[Any]=0.0_00_01 , lowercase_ : int=0.0 , lowercase_ : Union[str, Any]=1E-10 , lowercase_ : List[Any]=True , **lowercase_ : int , ) -> List[Any]: super().__init__(**lowercase_ ) lowercase__ : Optional[int] = hidden_size lowercase__ : int = intermediate_size lowercase__ : Union[str, Any] = num_hidden_layers lowercase__ : Dict = num_attention_heads lowercase__ : List[str] = patch_size lowercase__ : Optional[Any] = image_size lowercase__ : List[str] = initializer_range lowercase__ : List[Any] = attention_dropout lowercase__ : Union[str, Any] = layer_norm_eps lowercase__ : int = hidden_act lowercase__ : Any = qkv_bias @classmethod def __UpperCamelCase ( cls : Union[str, Any] , lowercase_ : str , **lowercase_ : Union[str, Any] ) -> "PretrainedConfig": cls._set_token_in_kwargs(lowercase_ ) lowercase__ , lowercase__ : Any = cls.get_config_dict(lowercase_ , **lowercase_ ) # get the vision config dict if we are loading from Blip2Config if config_dict.get("model_type" ) == "blip-2": lowercase__ : List[Any] = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowercase_ , **lowercase_ ) class snake_case_ ( _SCREAMING_SNAKE_CASE ): __A : Optional[Any] = "blip_2_qformer" def __init__( self : int , lowercase_ : int=3_05_22 , lowercase_ : Optional[int]=7_68 , lowercase_ : List[str]=12 , lowercase_ : str=12 , lowercase_ : List[str]=30_72 , lowercase_ : List[Any]="gelu" , lowercase_ : str=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : Optional[int]=5_12 , lowercase_ : Any=0.02 , lowercase_ : List[Any]=1E-12 , lowercase_ : List[str]=0 , lowercase_ : Dict="absolute" , lowercase_ : Any=2 , lowercase_ : int=14_08 , **lowercase_ : int , ) -> List[str]: super().__init__(pad_token_id=lowercase_ , **lowercase_ ) lowercase__ : Tuple = vocab_size lowercase__ : Optional[Any] = hidden_size lowercase__ : Any = num_hidden_layers lowercase__ : Optional[int] = num_attention_heads lowercase__ : str = hidden_act lowercase__ : Any = intermediate_size lowercase__ : Dict = hidden_dropout_prob lowercase__ : Optional[Any] = attention_probs_dropout_prob lowercase__ : Any = max_position_embeddings lowercase__ : Dict = initializer_range lowercase__ : List[Any] = layer_norm_eps lowercase__ : int = position_embedding_type lowercase__ : str = cross_attention_frequency lowercase__ : Optional[int] = encoder_hidden_size @classmethod def __UpperCamelCase ( cls : Dict , lowercase_ : List[Any] , **lowercase_ : List[str] ) -> "PretrainedConfig": cls._set_token_in_kwargs(lowercase_ ) lowercase__ , lowercase__ : List[str] = cls.get_config_dict(lowercase_ , **lowercase_ ) # get the qformer config dict if we are loading from Blip2Config if config_dict.get("model_type" ) == "blip-2": lowercase__ : List[str] = config_dict["qformer_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowercase_ , **lowercase_ ) class snake_case_ ( _SCREAMING_SNAKE_CASE ): __A : int = "blip-2" __A : Dict = True def __init__( self : List[Any] , lowercase_ : Tuple=None , lowercase_ : int=None , lowercase_ : Tuple=None , lowercase_ : Union[str, Any]=32 , **lowercase_ : Optional[int] ) -> Tuple: super().__init__(**lowercase_ ) if vision_config is None: lowercase__ : int = {} logger.info("vision_config is None. initializing the Blip2VisionConfig with default values." ) if qformer_config is None: lowercase__ : Dict = {} logger.info("qformer_config is None. Initializing the Blip2QFormerConfig with default values." ) if text_config is None: lowercase__ : Union[str, Any] = {} logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." ) lowercase__ : int = BlipaVisionConfig(**lowercase_ ) lowercase__ : Optional[int] = BlipaQFormerConfig(**lowercase_ ) lowercase__ : int = text_config["model_type"] if "model_type" in text_config else "opt" lowercase__ : Optional[Any] = CONFIG_MAPPING[text_model_type](**lowercase_ ) lowercase__ : Optional[int] = self.text_config.tie_word_embeddings lowercase__ : Tuple = self.text_config.is_encoder_decoder lowercase__ : Union[str, Any] = num_query_tokens lowercase__ : int = self.vision_config.hidden_size lowercase__ : Optional[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES lowercase__ : int = 1.0 lowercase__ : Any = 0.02 @classmethod def __UpperCamelCase ( cls : Dict , lowercase_ : str , lowercase_ : Any , lowercase_ : Optional[Any] , **lowercase_ : Tuple , ) -> int: return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowercase_ , ) def __UpperCamelCase ( self : int ) -> List[str]: lowercase__ : Optional[Any] = copy.deepcopy(self.__dict__ ) lowercase__ : Any = self.vision_config.to_dict() lowercase__ : str = self.qformer_config.to_dict() lowercase__ : int = self.text_config.to_dict() lowercase__ : Dict = self.__class__.model_type return output
87
'''simple docstring''' import numpy as np class a : def __init__( self ) -> List[str]: _a = (0, 0) _a = None _a = 0 _a = 0 _a = 0 def __eq__( self , __magic_name__ ) -> Optional[int]: return self.position == cell.position def __UpperCAmelCase ( self ) -> Any: print(self.position ) class a : def __init__( self , __magic_name__=(5, 5) ) -> Optional[int]: _a = np.zeros(__magic_name__ ) _a = world_size[0] _a = world_size[1] def __UpperCAmelCase ( self ) -> List[Any]: print(self.w ) def __UpperCAmelCase ( self , __magic_name__ ) -> Union[str, Any]: _a = [ (-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1), ] _a = cell.position[0] _a = cell.position[1] _a = [] for n in neughbour_cord: _a = current_x + n[0] _a = current_y + n[1] if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit: _a = Cell() _a = (x, y) _a = cell neighbours.append(__magic_name__ ) return neighbours def _A (lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :int ) -> List[str]: '''simple docstring''' _a = [] _a = [] _open.append(lowerCAmelCase__ ) while _open: _a = np.argmin([n.f for n in _open] ) _a = _open[min_f] _closed.append(_open.pop(lowerCAmelCase__ ) ) if current == goal: break for n in world.get_neigbours(lowerCAmelCase__ ): for c in _closed: if c == n: continue _a = current.g + 1 _a , _a = n.position _a , _a = goal.position _a = (ya - ya) ** 2 + (xa - xa) ** 2 _a = n.h + n.g for c in _open: if c == n and c.f < n.f: continue _open.append(lowerCAmelCase__ ) _a = [] while current.parent is not None: path.append(current.position ) _a = current.parent path.append(current.position ) return path[::-1] if __name__ == "__main__": a_ : str = Gridworld() # Start position and goal a_ : str = Cell() a_ : Dict = (0, 0) a_ : Dict = Cell() a_ : Optional[Any] = (4, 4) print(f'''path from {start.position} to {goal.position}''') a_ : Tuple = astar(world, start, goal) # Just for visual reasons. for i in s: a_ : Any = 1 print(world.w)
168
0
"""simple docstring""" from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class a__ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ): @register_to_config def __init__( self : List[Any], lowerCAmelCase : int = 768, ) -> int: super().__init__() lowercase : int = nn.Parameter(torch.zeros(1, lowerCAmelCase ) ) lowercase : str = nn.Parameter(torch.ones(1, lowerCAmelCase ) ) def lowercase ( self : List[str], lowerCAmelCase : Optional[Union[str, torch.device]] = None, lowerCAmelCase : Optional[torch.dtype] = None, ) -> Optional[int]: lowercase : Any = nn.Parameter(self.mean.to(lowerCAmelCase ).to(lowerCAmelCase ) ) lowercase : Optional[Any] = nn.Parameter(self.std.to(lowerCAmelCase ).to(lowerCAmelCase ) ) return self def lowercase ( self : Dict, lowerCAmelCase : Union[str, Any] ) -> int: lowercase : str = (embeds - self.mean) * 1.0 / self.std return embeds def lowercase ( self : int, lowerCAmelCase : Tuple ) -> str: lowercase : Any = (embeds * self.std) + self.mean return embeds
353
"""simple docstring""" import datasets from .evaluate import evaluate _UpperCamelCase: str = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n' _UpperCamelCase: int = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n' _UpperCamelCase: Optional[Any] = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class a__ ( datasets.Metric ): def lowercase ( self : List[str] ) -> Tuple: return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { 'predictions': { 'id': datasets.Value('string' ), 'prediction_text': datasets.features.Sequence(datasets.Value('string' ) ), }, 'references': { 'id': datasets.Value('string' ), 'answers': datasets.features.Sequence( { 'text': datasets.Value('string' ), 'answer_start': datasets.Value('int32' ), } ), }, } ), codebase_urls=['https://www.atticusprojectai.org/cuad'], reference_urls=['https://www.atticusprojectai.org/cuad'], ) def lowercase ( self : Any, lowerCAmelCase : int, lowerCAmelCase : Optional[Any] ) -> Optional[Any]: lowercase : int = {prediction['id']: prediction['prediction_text'] for prediction in predictions} lowercase : Any = [ { 'paragraphs': [ { 'qas': [ { 'answers': [{'text': answer_text} for answer_text in ref['answers']['text']], 'id': ref['id'], } for ref in references ] } ] } ] lowercase : int = evaluate(dataset=lowerCAmelCase, predictions=lowerCAmelCase ) return score
53
0
'''simple docstring''' import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType lowercase__ : List[str] = logging.get_logger(__name__) class __lowerCAmelCase ( _lowercase ): """simple docstring""" _snake_case : Union[str, Any] = 'vision-encoder-decoder' _snake_case : Tuple = True def __init__( self : Optional[int] , **lowerCAmelCase__ : Optional[Any] ) -> int: '''simple docstring''' super().__init__(**__a ) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( f"""A configuraton of type {self.model_type} cannot be instantiated because """ f"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" ) _UpperCamelCase = kwargs.pop('''encoder''' ) _UpperCamelCase = encoder_config.pop('''model_type''' ) _UpperCamelCase = kwargs.pop('''decoder''' ) _UpperCamelCase = decoder_config.pop('''model_type''' ) _UpperCamelCase = AutoConfig.for_model(__a , **__a ) _UpperCamelCase = AutoConfig.for_model(__a , **__a ) _UpperCamelCase = True @classmethod def snake_case__ ( cls : Optional[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] , **lowerCAmelCase__ : Dict ) -> PretrainedConfig: '''simple docstring''' logger.info('''Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' ) _UpperCamelCase = True _UpperCamelCase = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__a ) def snake_case__ ( self : List[str] ) -> List[Any]: '''simple docstring''' _UpperCamelCase = copy.deepcopy(self.__dict__ ) _UpperCamelCase = self.encoder.to_dict() _UpperCamelCase = self.decoder.to_dict() _UpperCamelCase = self.__class__.model_type return output class __lowerCAmelCase ( _lowercase ): """simple docstring""" _snake_case : Dict = version.parse('1.11' ) @property def snake_case__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def snake_case__ ( self : Dict ) -> float: '''simple docstring''' return 1e-4 @property def snake_case__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict({'''last_hidden_state''': {0: '''batch''', 1: '''encoder_sequence'''}} ) class __lowerCAmelCase ( _lowercase ): """simple docstring""" @property def snake_case__ ( self : str ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' _UpperCamelCase = OrderedDict() _UpperCamelCase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} _UpperCamelCase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} _UpperCamelCase = {0: '''batch''', 1: '''encoder_sequence'''} return common_inputs def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict = -1 , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : int = False , lowerCAmelCase__ : List[str] = None , ) -> Mapping[str, Any]: '''simple docstring''' import torch _UpperCamelCase = OrderedDict() _UpperCamelCase = super().generate_dummy_inputs( __a , batch_size=__a , seq_length=__a , is_pair=__a , framework=__a ) _UpperCamelCase , _UpperCamelCase = dummy_input['''input_ids'''].shape _UpperCamelCase = (batch, encoder_sequence, self._config.encoder_hidden_size) _UpperCamelCase = dummy_input.pop('''input_ids''' ) _UpperCamelCase = dummy_input.pop('''attention_mask''' ) _UpperCamelCase = torch.zeros(__a ) return common_inputs class __lowerCAmelCase ( _lowercase ): """simple docstring""" @property def snake_case__ ( self : str ) -> None: '''simple docstring''' pass def snake_case__ ( self : Dict , lowerCAmelCase__ : List[Any] ) -> OnnxConfig: '''simple docstring''' return VisionEncoderDecoderEncoderOnnxConfig(__a ) def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple = "default" ) -> OnnxConfig: '''simple docstring''' _UpperCamelCase = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(__a , __a )
324
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name class _lowerCamelCase ( _lowercase ): def __init__(self , __a , __a ) -> str: super().__init__() self.register_modules(unet=__a , scheduler=__a ) @torch.no_grad() def __call__(self , __a = 1 , __a = 1_00 , __a = None , __a = None , __a = True , ) -> Union[AudioPipelineOutput, Tuple]: if audio_length_in_s is None: UpperCamelCase = self.unet.config.sample_size / self.unet.config.sample_rate UpperCamelCase = audio_length_in_s * self.unet.config.sample_rate UpperCamelCase = 2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( F"{audio_length_in_s} is too small. Make sure it's bigger or equal to" F" {3 * down_scale_factor / self.unet.config.sample_rate}." ) UpperCamelCase = int(__a ) if sample_size % down_scale_factor != 0: UpperCamelCase = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( F"{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled" F" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising" " process." ) UpperCamelCase = int(__a ) UpperCamelCase = next(iter(self.unet.parameters() ) ).dtype UpperCamelCase = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(__a , __a ) and len(__a ) != batch_size: raise ValueError( F"You have passed a list of generators of length {len(__a )}, but requested an effective batch" F" size of {batch_size}. Make sure the batch size matches the length of the generators." ) UpperCamelCase = randn_tensor(__a , generator=__a , device=self.device , dtype=__a ) # set step values self.scheduler.set_timesteps(__a , device=audio.device ) UpperCamelCase = self.scheduler.timesteps.to(__a ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output UpperCamelCase = self.unet(__a , __a ).sample # 2. compute previous image: x_t -> t_t-1 UpperCamelCase = self.scheduler.step(__a , __a , __a ).prev_sample UpperCamelCase = audio.clamp(-1 , 1 ).float().cpu().numpy() UpperCamelCase = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=__a )
153
0
import gc import unittest import numpy as np import torch from torch.backends.cuda import sdp_kernel from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) from diffusers.utils import randn_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class a_ ( snake_case__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase_ = ConsistencyModelPipeline UpperCAmelCase_ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS UpperCAmelCase_ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS # Override required_optional_params to remove num_images_per_prompt UpperCAmelCase_ = frozenset( [ 'num_inference_steps', 'generator', 'latents', 'output_type', 'return_dict', 'callback', 'callback_steps', ] ) @property def __snake_case ( self : Dict): '''simple docstring''' lowerCAmelCase__ = UNetaDModel.from_pretrained( 'diffusers/consistency-models-test' , subfolder='test_unet' , ) return unet @property def __snake_case ( self : str): '''simple docstring''' lowerCAmelCase__ = UNetaDModel.from_pretrained( 'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , ) return unet def __snake_case ( self : List[str] , lowercase__ : int=False): '''simple docstring''' if class_cond: lowerCAmelCase__ = self.dummy_cond_unet else: lowerCAmelCase__ = self.dummy_uncond_unet # Default to CM multistep sampler lowerCAmelCase__ = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) lowerCAmelCase__ = { "unet": unet, "scheduler": scheduler, } return components def __snake_case ( self : int , lowercase__ : Optional[int] , lowercase__ : Optional[int]=0): '''simple docstring''' if str(UpperCAmelCase_).startswith('mps'): lowerCAmelCase__ = torch.manual_seed(UpperCAmelCase_) else: lowerCAmelCase__ = torch.Generator(device=UpperCAmelCase_).manual_seed(UpperCAmelCase_) lowerCAmelCase__ = { "batch_size": 1, "num_inference_steps": None, "timesteps": [22, 0], "generator": generator, "output_type": "np", } return inputs def __snake_case ( self : Union[str, Any]): '''simple docstring''' lowerCAmelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ = self.get_dummy_components() lowerCAmelCase__ = ConsistencyModelPipeline(**UpperCAmelCase_) lowerCAmelCase__ = pipe.to(UpperCAmelCase_) pipe.set_progress_bar_config(disable=UpperCAmelCase_) lowerCAmelCase__ = self.get_dummy_inputs(UpperCAmelCase_) lowerCAmelCase__ = pipe(**UpperCAmelCase_).images assert image.shape == (1, 32, 32, 3) lowerCAmelCase__ = image[0, -3:, -3:, -1] lowerCAmelCase__ = np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def __snake_case ( self : Optional[int]): '''simple docstring''' lowerCAmelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ = self.get_dummy_components(class_cond=UpperCAmelCase_) lowerCAmelCase__ = ConsistencyModelPipeline(**UpperCAmelCase_) lowerCAmelCase__ = pipe.to(UpperCAmelCase_) pipe.set_progress_bar_config(disable=UpperCAmelCase_) lowerCAmelCase__ = self.get_dummy_inputs(UpperCAmelCase_) lowerCAmelCase__ = 0 lowerCAmelCase__ = pipe(**UpperCAmelCase_).images assert image.shape == (1, 32, 32, 3) lowerCAmelCase__ = image[0, -3:, -3:, -1] lowerCAmelCase__ = np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def __snake_case ( self : Union[str, Any]): '''simple docstring''' lowerCAmelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ = self.get_dummy_components() lowerCAmelCase__ = ConsistencyModelPipeline(**UpperCAmelCase_) lowerCAmelCase__ = pipe.to(UpperCAmelCase_) pipe.set_progress_bar_config(disable=UpperCAmelCase_) lowerCAmelCase__ = self.get_dummy_inputs(UpperCAmelCase_) lowerCAmelCase__ = 1 lowerCAmelCase__ = None lowerCAmelCase__ = pipe(**UpperCAmelCase_).images assert image.shape == (1, 32, 32, 3) lowerCAmelCase__ = image[0, -3:, -3:, -1] lowerCAmelCase__ = np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def __snake_case ( self : Any): '''simple docstring''' lowerCAmelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ = self.get_dummy_components(class_cond=UpperCAmelCase_) lowerCAmelCase__ = ConsistencyModelPipeline(**UpperCAmelCase_) lowerCAmelCase__ = pipe.to(UpperCAmelCase_) pipe.set_progress_bar_config(disable=UpperCAmelCase_) lowerCAmelCase__ = self.get_dummy_inputs(UpperCAmelCase_) lowerCAmelCase__ = 1 lowerCAmelCase__ = None lowerCAmelCase__ = 0 lowerCAmelCase__ = pipe(**UpperCAmelCase_).images assert image.shape == (1, 32, 32, 3) lowerCAmelCase__ = image[0, -3:, -3:, -1] lowerCAmelCase__ = np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 @slow @require_torch_gpu class a_ ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self : int): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self : Tuple , lowercase__ : Optional[Any]=0 , lowercase__ : int=False , lowercase__ : List[Any]="cpu" , lowercase__ : List[Any]=torch.floataa , lowercase__ : int=(1, 3, 64, 64)): '''simple docstring''' lowerCAmelCase__ = torch.manual_seed(UpperCAmelCase_) lowerCAmelCase__ = { "num_inference_steps": None, "timesteps": [22, 0], "class_labels": 0, "generator": generator, "output_type": "np", } if get_fixed_latents: lowerCAmelCase__ = self.get_fixed_latents(seed=UpperCAmelCase_ , device=UpperCAmelCase_ , dtype=UpperCAmelCase_ , shape=UpperCAmelCase_) lowerCAmelCase__ = latents return inputs def __snake_case ( self : str , lowercase__ : Dict=0 , lowercase__ : List[Any]="cpu" , lowercase__ : Tuple=torch.floataa , lowercase__ : Dict=(1, 3, 64, 64)): '''simple docstring''' if type(UpperCAmelCase_) == str: lowerCAmelCase__ = torch.device(UpperCAmelCase_) lowerCAmelCase__ = torch.Generator(device=UpperCAmelCase_).manual_seed(UpperCAmelCase_) lowerCAmelCase__ = randn_tensor(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=UpperCAmelCase_ , dtype=UpperCAmelCase_) return latents def __snake_case ( self : Dict): '''simple docstring''' lowerCAmelCase__ = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2') lowerCAmelCase__ = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) lowerCAmelCase__ = ConsistencyModelPipeline(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_) pipe.to(torch_device=UpperCAmelCase_) pipe.set_progress_bar_config(disable=UpperCAmelCase_) lowerCAmelCase__ = self.get_inputs() lowerCAmelCase__ = pipe(**UpperCAmelCase_).images assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ = image[0, -3:, -3:, -1] lowerCAmelCase__ = np.array([0.0_888, 0.0_881, 0.0_666, 0.0_479, 0.0_292, 0.0_195, 0.0_201, 0.0_163, 0.0_254]) assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2 def __snake_case ( self : Optional[int]): '''simple docstring''' lowerCAmelCase__ = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2') lowerCAmelCase__ = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) lowerCAmelCase__ = ConsistencyModelPipeline(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_) pipe.to(torch_device=UpperCAmelCase_) pipe.set_progress_bar_config(disable=UpperCAmelCase_) lowerCAmelCase__ = self.get_inputs() lowerCAmelCase__ = 1 lowerCAmelCase__ = None lowerCAmelCase__ = pipe(**UpperCAmelCase_).images assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ = image[0, -3:, -3:, -1] lowerCAmelCase__ = np.array([0.0_340, 0.0_152, 0.0_063, 0.0_267, 0.0_221, 0.0_107, 0.0_416, 0.0_186, 0.0_217]) assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2 @require_torch_a def __snake_case ( self : Union[str, Any]): '''simple docstring''' lowerCAmelCase__ = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2') lowerCAmelCase__ = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) lowerCAmelCase__ = ConsistencyModelPipeline(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_) pipe.to(torch_device=UpperCAmelCase_ , torch_dtype=torch.floataa) pipe.set_progress_bar_config(disable=UpperCAmelCase_) lowerCAmelCase__ = self.get_inputs(get_fixed_latents=UpperCAmelCase_ , device=UpperCAmelCase_) # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=UpperCAmelCase_ , enable_math=UpperCAmelCase_ , enable_mem_efficient=UpperCAmelCase_): lowerCAmelCase__ = pipe(**UpperCAmelCase_).images assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ = image[0, -3:, -3:, -1] lowerCAmelCase__ = np.array([0.1_875, 0.1_428, 0.1_289, 0.2_151, 0.2_092, 0.1_477, 0.1_877, 0.1_641, 0.1_353]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 @require_torch_a def __snake_case ( self : Optional[int]): '''simple docstring''' lowerCAmelCase__ = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2') lowerCAmelCase__ = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) lowerCAmelCase__ = ConsistencyModelPipeline(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_) pipe.to(torch_device=UpperCAmelCase_ , torch_dtype=torch.floataa) pipe.set_progress_bar_config(disable=UpperCAmelCase_) lowerCAmelCase__ = self.get_inputs(get_fixed_latents=UpperCAmelCase_ , device=UpperCAmelCase_) lowerCAmelCase__ = 1 lowerCAmelCase__ = None # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=UpperCAmelCase_ , enable_math=UpperCAmelCase_ , enable_mem_efficient=UpperCAmelCase_): lowerCAmelCase__ = pipe(**UpperCAmelCase_).images assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ = image[0, -3:, -3:, -1] lowerCAmelCase__ = np.array([0.1_663, 0.1_948, 0.2_275, 0.1_680, 0.1_204, 0.1_245, 0.1_858, 0.1_338, 0.2_095]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
365
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to properly calculate the metrics on the # validation dataset when in a distributed system, and builds off the # `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCAmelCase__ = 16 lowerCAmelCase__ = 32 def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = 1_6 ): lowerCAmelCase__ = AutoTokenizer.from_pretrained('bert-base-cased' ) lowerCAmelCase__ = load_dataset('glue' , 'mrpc' ) def tokenize_function(lowerCAmelCase__ ): # max_length=None => use the model max length (it's actually the default) lowerCAmelCase__ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowerCAmelCase__ = datasets.map( lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCAmelCase__ = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(lowerCAmelCase__ ): # On TPU it's best to pad everything to the same length or training will be very slow. lowerCAmelCase__ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowerCAmelCase__ = 1_6 elif accelerator.mixed_precision != "no": lowerCAmelCase__ = 8 else: lowerCAmelCase__ = None return tokenizer.pad( lowerCAmelCase__ , padding='longest' , max_length=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_tensors='pt' , ) # Instantiate dataloaders. lowerCAmelCase__ = DataLoader( tokenized_datasets['train'] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ ) lowerCAmelCase__ = DataLoader( tokenized_datasets['validation'] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1": from accelerate.test_utils.training import mocked_dataloaders lowerCAmelCase__ = mocked_dataloaders # noqa: F811 def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS' , lowerCAmelCase__ ) == "1": lowerCAmelCase__ = 2 # Initialize accelerator lowerCAmelCase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCAmelCase__ = config['lr'] lowerCAmelCase__ = int(config['num_epochs'] ) lowerCAmelCase__ = int(config['seed'] ) lowerCAmelCase__ = int(config['batch_size'] ) lowerCAmelCase__ = evaluate.load('glue' , 'mrpc' ) # If the batch size is too big we use gradient accumulation lowerCAmelCase__ = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: lowerCAmelCase__ = batch_size // MAX_GPU_BATCH_SIZE lowerCAmelCase__ = MAX_GPU_BATCH_SIZE set_seed(lowerCAmelCase__ ) lowerCAmelCase__ , lowerCAmelCase__ = get_dataloaders(lowerCAmelCase__ , lowerCAmelCase__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCAmelCase__ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=lowerCAmelCase__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowerCAmelCase__ = model.to(accelerator.device ) # Instantiate optimizer lowerCAmelCase__ = AdamW(params=model.parameters() , lr=lowerCAmelCase__ ) # Instantiate scheduler lowerCAmelCase__ = get_linear_schedule_with_warmup( optimizer=lowerCAmelCase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowerCAmelCase__ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # Now we train the model for epoch in range(lowerCAmelCase__ ): model.train() for step, batch in enumerate(lowerCAmelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) lowerCAmelCase__ = model(**lowerCAmelCase__ ) lowerCAmelCase__ = outputs.loss lowerCAmelCase__ = loss / gradient_accumulation_steps accelerator.backward(lowerCAmelCase__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() lowerCAmelCase__ = 0 for step, batch in enumerate(lowerCAmelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowerCAmelCase__ = model(**lowerCAmelCase__ ) lowerCAmelCase__ = outputs.logits.argmax(dim=-1 ) lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather((predictions, batch['labels']) ) # New Code # # First we check if it's a distributed system if accelerator.use_distributed: # Then see if we're on the last batch of our eval dataloader if step == len(lowerCAmelCase__ ) - 1: # Last batch needs to be truncated on distributed systems as it contains additional samples lowerCAmelCase__ = predictions[: len(eval_dataloader.dataset ) - samples_seen] lowerCAmelCase__ = references[: len(eval_dataloader.dataset ) - samples_seen] else: # Otherwise we add the number of samples seen samples_seen += references.shape[0] # All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`: # accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , ) lowerCAmelCase__ = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , lowerCAmelCase__ ) def __lowerCamelCase ( ): lowerCAmelCase__ = argparse.ArgumentParser(description='Simple example of training script.' ) parser.add_argument( '--mixed_precision' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' ) lowerCAmelCase__ = parser.parse_args() lowerCAmelCase__ = {'lr': 2e-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6} training_function(lowerCAmelCase__ , lowerCAmelCase__ ) if __name__ == "__main__": main()
119
0
"""simple docstring""" import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor _a = logging.get_logger(__name__) class A_ (lowercase__ ): '''simple docstring''' def __init__( self , *lowercase_ , **lowercase_ ): """simple docstring""" warnings.warn( "The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use CLIPImageProcessor instead." , lowercase_ , ) super().__init__(*lowercase_ , **lowercase_ )
61
"""simple docstring""" from __future__ import annotations import math def __a ( __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : Any = u for i in range(1, __lowerCamelCase ): UpperCAmelCase_ : int = temp * (u - i) return temp def __a ( ): UpperCAmelCase_ : str = int(input("enter the numbers of values: " ) ) UpperCAmelCase_ : list[list[float]] = [] for _ in range(__lowerCamelCase ): y.append([] ) for i in range(__lowerCamelCase ): for j in range(__lowerCamelCase ): y[i].append(__lowerCamelCase ) UpperCAmelCase_ : Tuple = 0 print("enter the values of parameters in a list: " ) UpperCAmelCase_ : Union[str, Any] = list(map(__lowerCamelCase, input().split() ) ) print("enter the values of corresponding parameters: " ) for i in range(__lowerCamelCase ): UpperCAmelCase_ : int = float(input() ) UpperCAmelCase_ : Tuple = int(input("enter the value to interpolate: " ) ) UpperCAmelCase_ : Tuple = (value - x[0]) / (x[1] - x[0]) # for calculating forward difference table for i in range(1, __lowerCamelCase ): for j in range(n - i ): UpperCAmelCase_ : Union[str, Any] = y[j + 1][i - 1] - y[j][i - 1] UpperCAmelCase_ : Optional[int] = y[0][0] for i in range(1, __lowerCamelCase ): summ += (ucal(__lowerCamelCase, __lowerCamelCase ) * y[0][i]) / math.factorial(__lowerCamelCase ) print(f"""the value at {value} is {summ}""" ) if __name__ == "__main__": main()
61
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowercase : List[Any] = { """configuration_deberta""": ["""DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DebertaConfig""", """DebertaOnnxConfig"""], """tokenization_deberta""": ["""DebertaTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Tuple = ["""DebertaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Dict = [ """DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """DebertaForMaskedLM""", """DebertaForQuestionAnswering""", """DebertaForSequenceClassification""", """DebertaForTokenClassification""", """DebertaModel""", """DebertaPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Dict = [ """TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFDebertaForMaskedLM""", """TFDebertaForQuestionAnswering""", """TFDebertaForSequenceClassification""", """TFDebertaForTokenClassification""", """TFDebertaModel""", """TFDebertaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig from .tokenization_deberta import DebertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_deberta_fast import DebertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deberta import ( DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, DebertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deberta import ( TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFDebertaForMaskedLM, TFDebertaForQuestionAnswering, TFDebertaForSequenceClassification, TFDebertaForTokenClassification, TFDebertaModel, TFDebertaPreTrainedModel, ) else: import sys lowercase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
285
import unittest from datasets import load_dataset from transformers import BloomTokenizerFast from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __snake_case ( lowerCAmelCase , unittest.TestCase ): _a : Optional[int]= None _a : Optional[Any]= BloomTokenizerFast _a : Tuple= BloomTokenizerFast _a : str= True _a : Optional[int]= False _a : List[Any]= "tokenizer_file" _a : List[Any]= {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"} def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' super().setUp() lowercase : Optional[Any] = BloomTokenizerFast.from_pretrained("""bigscience/tokenizer""" ) tokenizer.save_pretrained(self.tmpdirname ) def _SCREAMING_SNAKE_CASE ( self ,**snake_case ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return BloomTokenizerFast.from_pretrained(self.tmpdirname ,**snake_case ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Tuple = self.get_rust_tokenizer() lowercase : List[str] = ["""The quick brown fox</s>""", """jumps over the lazy dog</s>"""] lowercase : Optional[int] = [[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]] lowercase : Any = tokenizer.batch_encode_plus(snake_case )["""input_ids"""] self.assertListEqual(snake_case ,snake_case ) lowercase : Optional[int] = tokenizer.batch_decode(snake_case ) self.assertListEqual(snake_case ,snake_case ) def _SCREAMING_SNAKE_CASE ( self ,snake_case=6 ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowercase : Dict = self.rust_tokenizer_class.from_pretrained(snake_case ,**snake_case ) # tokenizer_r.pad_token = None # Hotfixing padding = None # Simple input lowercase : Dict = """This is a simple input""" lowercase : Tuple = ["""This is a simple input 1""", """This is a simple input 2"""] lowercase : Dict = ("""This is a simple input""", """This is a pair""") lowercase : Optional[Any] = [ ("""This is a simple input 1""", """This is a simple input 2"""), ("""This is a simple pair 1""", """This is a simple pair 2"""), ] # Simple input tests try: tokenizer_r.encode(snake_case ,max_length=snake_case ) tokenizer_r.encode_plus(snake_case ,max_length=snake_case ) tokenizer_r.batch_encode_plus(snake_case ,max_length=snake_case ) tokenizer_r.encode(snake_case ,max_length=snake_case ) tokenizer_r.batch_encode_plus(snake_case ,max_length=snake_case ) except ValueError: self.fail("""Bloom Tokenizer should be able to deal with padding""" ) lowercase : Optional[int] = None # Hotfixing padding = None self.assertRaises(snake_case ,tokenizer_r.encode ,snake_case ,max_length=snake_case ,padding="""max_length""" ) # Simple input self.assertRaises(snake_case ,tokenizer_r.encode_plus ,snake_case ,max_length=snake_case ,padding="""max_length""" ) # Simple input self.assertRaises( snake_case ,tokenizer_r.batch_encode_plus ,snake_case ,max_length=snake_case ,padding="""max_length""" ,) # Pair input self.assertRaises(snake_case ,tokenizer_r.encode ,snake_case ,max_length=snake_case ,padding="""max_length""" ) # Pair input self.assertRaises(snake_case ,tokenizer_r.encode_plus ,snake_case ,max_length=snake_case ,padding="""max_length""" ) # Pair input self.assertRaises( snake_case ,tokenizer_r.batch_encode_plus ,snake_case ,max_length=snake_case ,padding="""max_length""" ,) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : List[Any] = self.get_rust_tokenizer() lowercase : List[str] = load_dataset("""xnli""" ,"""all_languages""" ,split="""test""" ,streaming=snake_case ) lowercase : Optional[Any] = next(iter(snake_case ) )["""premise"""] # pick up one data lowercase : str = list(sample_data.values() ) lowercase : Optional[int] = list(map(tokenizer.encode ,snake_case ) ) lowercase : Dict = [tokenizer.decode(snake_case ,clean_up_tokenization_spaces=snake_case ) for x in output_tokens] self.assertListEqual(snake_case ,snake_case ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) ,1 ) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) ,1 )
285
1
from __future__ import annotations import unittest from transformers import EsmConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers.models.esm.modeling_tf_esm import ( TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, TFEsmModel, ) class UpperCamelCase__ : '''simple docstring''' def __init__( self , UpperCamelCase__ , ) -> Optional[int]: lowerCamelCase : int = parent lowerCamelCase : int = 13 lowerCamelCase : str = 7 lowerCamelCase : Any = True lowerCamelCase : Optional[int] = True lowerCamelCase : Dict = True lowerCamelCase : List[Any] = 99 lowerCamelCase : List[Any] = 32 lowerCamelCase : str = 2 lowerCamelCase : Union[str, Any] = 4 lowerCamelCase : str = 37 lowerCamelCase : Any = "gelu" lowerCamelCase : Optional[Any] = 0.1 lowerCamelCase : Dict = 0.1 lowerCamelCase : Optional[Any] = 512 lowerCamelCase : Optional[Any] = 16 lowerCamelCase : List[Any] = 2 lowerCamelCase : int = 0.02 lowerCamelCase : Tuple = 3 lowerCamelCase : Optional[int] = 4 lowerCamelCase : Any = None def _lowercase ( self ) -> List[str]: lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase : Union[str, Any] = None if self.use_input_mask: lowerCamelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase : Union[str, Any] = None lowerCamelCase : Tuple = None lowerCamelCase : Tuple = None if self.use_labels: lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase : Union[str, Any] = EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowercase ( self ) -> Dict: ( ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ) : Dict = self.prepare_config_and_inputs() lowerCamelCase : Union[str, Any] = True lowerCamelCase : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]: lowerCamelCase : List[Any] = TFEsmModel(config=UpperCamelCase__ ) lowerCamelCase : int = {"input_ids": input_ids, "attention_mask": input_mask} lowerCamelCase : List[Any] = model(UpperCamelCase__ ) lowerCamelCase : int = [input_ids, input_mask] lowerCamelCase : Optional[Any] = model(UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> str: lowerCamelCase : Optional[int] = True lowerCamelCase : Union[str, Any] = TFEsmModel(config=UpperCamelCase__ ) lowerCamelCase : str = { "input_ids": input_ids, "attention_mask": input_mask, "encoder_hidden_states": encoder_hidden_states, "encoder_attention_mask": encoder_attention_mask, } lowerCamelCase : List[str] = model(UpperCamelCase__ ) lowerCamelCase : Tuple = [input_ids, input_mask] lowerCamelCase : Dict = model(UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ ) # Also check the case where encoder outputs are not passed lowerCamelCase : Any = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int: lowerCamelCase : List[str] = TFEsmForMaskedLM(config=UpperCamelCase__ ) lowerCamelCase : Any = model([input_ids, input_mask] ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]: lowerCamelCase : List[Any] = self.num_labels lowerCamelCase : Dict = TFEsmForTokenClassification(config=UpperCamelCase__ ) lowerCamelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask} lowerCamelCase : Dict = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowercase ( self ) -> Optional[Any]: lowerCamelCase : Tuple = self.prepare_config_and_inputs() ( ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ) : Optional[Any] = config_and_inputs lowerCamelCase : Any = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class UpperCamelCase__ (lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase_ : Dict = ( ( TFEsmModel, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, ) if is_tf_available() else () ) lowerCamelCase_ : Optional[Any] = ( { """feature-extraction""": TFEsmModel, """fill-mask""": TFEsmForMaskedLM, """text-classification""": TFEsmForSequenceClassification, """token-classification""": TFEsmForTokenClassification, """zero-shot""": TFEsmForSequenceClassification, } if is_tf_available() else {} ) lowerCamelCase_ : Any = False lowerCamelCase_ : Dict = False def _lowercase ( self ) -> Any: lowerCamelCase : Tuple = TFEsmModelTester(self ) lowerCamelCase : int = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 ) def _lowercase ( self ) -> List[str]: self.config_tester.run_common_tests() def _lowercase ( self ) -> str: lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowercase ( self ) -> List[str]: lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*UpperCamelCase__ ) def _lowercase ( self ) -> Union[str, Any]: lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ ) def _lowercase ( self ) -> List[Any]: lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ ) @slow def _lowercase ( self ) -> List[Any]: for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase : Any = TFEsmModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) @unittest.skip("Protein models do not support embedding resizing." ) def _lowercase ( self ) -> List[str]: pass @unittest.skip("Protein models do not support embedding resizing." ) def _lowercase ( self ) -> Optional[Any]: pass def _lowercase ( self ) -> Tuple: lowerCamelCase , lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase : Optional[Any] = model_class(UpperCamelCase__ ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class is TFEsmForMaskedLM: # Output embedding test differs from the main test because they're a matrix, not a layer lowerCamelCase : Any = model.get_bias() assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) for k, v in name.items(): assert isinstance(UpperCamelCase__ , tf.Variable ) else: lowerCamelCase : str = model.get_output_embeddings() assert x is None lowerCamelCase : Optional[Any] = model.get_bias() assert name is None @require_tf class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' @slow def _lowercase ( self ) -> Dict: lowerCamelCase : int = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" ) lowerCamelCase : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] ) lowerCamelCase : Optional[int] = model(UpperCamelCase__ )[0] lowerCamelCase : Union[str, Any] = [1, 6, 33] self.assertEqual(list(output.numpy().shape ) , UpperCamelCase__ ) # compare the actual values for a slice. lowerCamelCase : List[str] = tf.constant( [ [ [8.921518, -10.589814, -6.4671307], [-6.3967156, -13.911377, -1.1211915], [-7.781247, -13.951557, -3.740592], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) ) @slow def _lowercase ( self ) -> str: lowerCamelCase : Dict = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" ) lowerCamelCase : int = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) lowerCamelCase : Dict = model(UpperCamelCase__ )[0] # compare the actual values for a slice. lowerCamelCase : int = tf.constant( [ [ [0.14443092, 0.54125327, 0.3247739], [0.30340484, 0.00526676, 0.31077722], [0.32278043, -0.24987096, 0.3414628], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
48
import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class __lowerCamelCase (_a ): _lowercase = 0 _lowercase = False _lowercase = 3.0 class __lowerCamelCase (unittest.TestCase ): def snake_case_ ( self: Any ): '''simple docstring''' self.assertDictEqual(MockClass().to_kwargs(),{} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs(),{'a': 2} ) self.assertDictEqual(MockClass(a=2,b=A_ ).to_kwargs(),{'a': 2, 'b': True} ) self.assertDictEqual(MockClass(a=2,c=2.2_5 ).to_kwargs(),{'a': 2, 'c': 2.2_5} ) @require_cuda def snake_case_ ( self: Optional[int] ): '''simple docstring''' __UpperCamelCase = GradScalerKwargs(init_scale=1024,growth_factor=2 ) AcceleratorState._reset_state() __UpperCamelCase = Accelerator(mixed_precision='fp16',kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) __UpperCamelCase = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale,1_0_2_4.0 ) self.assertEqual(scaler._growth_factor,2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor,0.5 ) self.assertEqual(scaler._growth_interval,2000 ) self.assertEqual(scaler._enabled,A_ ) @require_multi_gpu def snake_case_ ( self: str ): '''simple docstring''' __UpperCamelCase = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] execute_subprocess_async(A_,env=os.environ.copy() ) if __name__ == "__main__": __snake_case = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True) __snake_case = Accelerator(kwargs_handlers=[ddp_scaler]) __snake_case = torch.nn.Linear(1_0_0, 2_0_0) __snake_case = accelerator.prepare(model) # Check the values changed in kwargs __snake_case = '''''' __snake_case = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4) if observed_bucket_cap_map != 1_5: error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
310
0
import os from argparse import ArgumentParser, Namespace from ..data import SingleSentenceClassificationProcessor as Processor from ..pipelines import TextClassificationPipeline from ..utils import is_tf_available, is_torch_available, logging from . import BaseTransformersCLICommand if not is_tf_available() and not is_torch_available(): raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training") # TF training parameters UpperCamelCase_ = False UpperCamelCase_ = False def A ( __UpperCAmelCase ) -> Any: '''simple docstring''' return TrainCommand(__UpperCAmelCase ) class a_ ( _snake_case ): @staticmethod def __a ( _lowercase :ArgumentParser) -> List[Any]: UpperCAmelCase_ = parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''') train_parser.add_argument( '''--train_data''' , type=_lowercase , required=_lowercase , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , ) train_parser.add_argument( '''--column_label''' , type=_lowercase , default=0 , help='''Column of the dataset csv file with example labels.''') train_parser.add_argument( '''--column_text''' , type=_lowercase , default=1 , help='''Column of the dataset csv file with example texts.''') train_parser.add_argument( '''--column_id''' , type=_lowercase , default=2 , help='''Column of the dataset csv file with example ids.''') train_parser.add_argument( '''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''') train_parser.add_argument('''--validation_data''' , type=_lowercase , default='''''' , help='''path to validation dataset.''') train_parser.add_argument( '''--validation_split''' , type=_lowercase , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , ) train_parser.add_argument('''--output''' , type=_lowercase , default='''./''' , help='''path to saved the trained model.''') train_parser.add_argument( '''--task''' , type=_lowercase , default='''text_classification''' , help='''Task to train the model on.''') train_parser.add_argument( '''--model''' , type=_lowercase , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''') train_parser.add_argument('''--train_batch_size''' , type=_lowercase , default=32 , help='''Batch size for training.''') train_parser.add_argument('''--valid_batch_size''' , type=_lowercase , default=64 , help='''Batch size for validation.''') train_parser.add_argument('''--learning_rate''' , type=_lowercase , default=3E-5 , help='''Learning rate.''') train_parser.add_argument('''--adam_epsilon''' , type=_lowercase , default=1E-0_8 , help='''Epsilon for Adam optimizer.''') train_parser.set_defaults(func=_lowercase) def __init__( self :Union[str, Any] , _lowercase :Namespace) -> Union[str, Any]: UpperCAmelCase_ = logging.get_logger('''transformers-cli/training''') UpperCAmelCase_ = '''tf''' if is_tf_available() else '''torch''' os.makedirs(args.output , exist_ok=_lowercase) UpperCAmelCase_ = args.output UpperCAmelCase_ = args.column_label UpperCAmelCase_ = args.column_text UpperCAmelCase_ = args.column_id self.logger.info(f"Loading {args.task} pipeline for {args.model}") if args.task == "text_classification": UpperCAmelCase_ = TextClassificationPipeline.from_pretrained(args.model) elif args.task == "token_classification": raise NotImplementedError elif args.task == "question_answering": raise NotImplementedError self.logger.info(f"Loading dataset from {args.train_data}") UpperCAmelCase_ = Processor.create_from_csv( args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) UpperCAmelCase_ = None if args.validation_data: self.logger.info(f"Loading validation dataset from {args.validation_data}") UpperCAmelCase_ = Processor.create_from_csv( args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) UpperCAmelCase_ = args.validation_split UpperCAmelCase_ = args.train_batch_size UpperCAmelCase_ = args.valid_batch_size UpperCAmelCase_ = args.learning_rate UpperCAmelCase_ = args.adam_epsilon def __a ( self :int) -> Tuple: if self.framework == "tf": return self.run_tf() return self.run_torch() def __a ( self :Optional[Any]) -> Any: raise NotImplementedError def __a ( self :int) -> Optional[Any]: self.pipeline.fit( self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , ) # Save trained pipeline self.pipeline.save_pretrained(self.output)
344
from ..utils import DummyObject, requires_backends class a_ ( metaclass=_snake_case ): UpperCamelCase__ : Any =["torch", "scipy"] def __init__( self :List[str] , *_lowercase :List[str] , **_lowercase :Union[str, Any]) -> List[Any]: requires_backends(self , ['''torch''', '''scipy''']) @classmethod def __a ( cls :Dict , *_lowercase :Any , **_lowercase :Dict) -> Union[str, Any]: requires_backends(cls , ['''torch''', '''scipy''']) @classmethod def __a ( cls :Optional[Any] , *_lowercase :str , **_lowercase :Optional[Any]) -> Union[str, Any]: requires_backends(cls , ['''torch''', '''scipy'''])
344
1
import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class _A ( _lowerCamelCase ): _UpperCamelCase : List[Any] = (UnCLIPScheduler,) def __a ( self : Optional[int] , **_A : str ) -> List[str]: """simple docstring""" lowercase : List[Any] = { '''num_train_timesteps''': 1_000, '''variance_type''': '''fixed_small_log''', '''clip_sample''': True, '''clip_sample_range''': 1.0, '''prediction_type''': '''epsilon''', } config.update(**_A ) return config def __a ( self : int ) -> Tuple: """simple docstring""" for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=_A ) def __a ( self : Union[str, Any] ) -> int: """simple docstring""" for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=_A ) def __a ( self : Dict ) -> Tuple: """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=_A ) def __a ( self : int ) -> Dict: """simple docstring""" for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=_A ) def __a ( self : str ) -> Union[str, Any]: """simple docstring""" for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=_A ) def __a ( self : int ) -> Dict: """simple docstring""" for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=_A , prev_timestep=_A ) def __a ( self : Any ) -> Optional[int]: """simple docstring""" lowercase : str = self.scheduler_classes[0] lowercase : str = self.get_scheduler_config(variance_type='''fixed_small_log''' ) lowercase : Dict = scheduler_class(**_A ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_000E-10 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1E-5 def __a ( self : Any ) -> int: """simple docstring""" lowercase : str = self.scheduler_classes[0] lowercase : int = self.get_scheduler_config(variance_type='''learned_range''' ) lowercase : Tuple = scheduler_class(**_A ) lowercase : List[str] = 0.5 assert scheduler._get_variance(1 , predicted_variance=_A ) - -10.1_712_790 < 1E-5 assert scheduler._get_variance(487 , predicted_variance=_A ) - -5.7_998_052 < 1E-5 assert scheduler._get_variance(999 , predicted_variance=_A ) - -0.0_010_011 < 1E-5 def __a ( self : List[str] ) -> List[Any]: """simple docstring""" lowercase : List[str] = self.scheduler_classes[0] lowercase : int = self.get_scheduler_config() lowercase : List[str] = scheduler_class(**_A ) lowercase : Union[str, Any] = scheduler.timesteps lowercase : Any = self.dummy_model() lowercase : Dict = self.dummy_sample_deter lowercase : Optional[Any] = torch.manual_seed(0 ) for i, t in enumerate(_A ): # 1. predict noise residual lowercase : Optional[int] = model(_A , _A ) # 2. predict previous mean of sample x_t-1 lowercase : int = scheduler.step(_A , _A , _A , generator=_A ).prev_sample lowercase : Any = pred_prev_sample lowercase : Dict = torch.sum(torch.abs(_A ) ) lowercase : Union[str, Any] = torch.mean(torch.abs(_A ) ) assert abs(result_sum.item() - 252.2_682_495 ) < 1E-2 assert abs(result_mean.item() - 0.3_284_743 ) < 1E-3 def __a ( self : Dict ) -> Dict: """simple docstring""" lowercase : Any = self.scheduler_classes[0] lowercase : List[Any] = self.get_scheduler_config() lowercase : Optional[Any] = scheduler_class(**_A ) scheduler.set_timesteps(25 ) lowercase : Any = scheduler.timesteps lowercase : int = self.dummy_model() lowercase : Union[str, Any] = self.dummy_sample_deter lowercase : str = torch.manual_seed(0 ) for i, t in enumerate(_A ): # 1. predict noise residual lowercase : Any = model(_A , _A ) if i + 1 == timesteps.shape[0]: lowercase : Union[str, Any] = None else: lowercase : List[str] = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 lowercase : Union[str, Any] = scheduler.step( _A , _A , _A , prev_timestep=_A , generator=_A ).prev_sample lowercase : Union[str, Any] = pred_prev_sample lowercase : List[str] = torch.sum(torch.abs(_A ) ) lowercase : Optional[Any] = torch.mean(torch.abs(_A ) ) assert abs(result_sum.item() - 258.2_044_983 ) < 1E-2 assert abs(result_mean.item() - 0.3_362_038 ) < 1E-3 def __a ( self : Optional[Any] ) -> Dict: """simple docstring""" pass def __a ( self : int ) -> List[str]: """simple docstring""" pass
308
import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class _A : def __init__( self : int , _A : Optional[int] , _A : Any=13 , _A : List[Any]=7 , _A : List[Any]=True , _A : Optional[Any]=True , _A : str=True , _A : Any=True , _A : Dict=True , _A : Optional[Any]=False , _A : Any=False , _A : List[str]=False , _A : Optional[int]=2 , _A : List[Any]=99 , _A : str=0 , _A : Dict=32 , _A : Dict=5 , _A : List[Any]=4 , _A : Optional[Any]=0.1 , _A : Optional[int]=0.1 , _A : Optional[Any]=512 , _A : Optional[Any]=2 , _A : Optional[Any]=0.02 , _A : Optional[int]=2 , _A : Tuple=4 , _A : List[Any]="last" , _A : List[str]=True , _A : Tuple=None , _A : Optional[Any]=0 , ) -> Any: """simple docstring""" lowercase : str = parent lowercase : Optional[Any] = batch_size lowercase : Union[str, Any] = seq_length lowercase : str = is_training lowercase : str = use_input_lengths lowercase : List[Any] = use_token_type_ids lowercase : Union[str, Any] = use_labels lowercase : Tuple = gelu_activation lowercase : Dict = sinusoidal_embeddings lowercase : Any = causal lowercase : str = asm lowercase : Optional[Any] = n_langs lowercase : Dict = vocab_size lowercase : Dict = n_special lowercase : List[Any] = hidden_size lowercase : str = num_hidden_layers lowercase : int = num_attention_heads lowercase : str = hidden_dropout_prob lowercase : Dict = attention_probs_dropout_prob lowercase : List[Any] = max_position_embeddings lowercase : Optional[int] = type_sequence_label_size lowercase : List[str] = initializer_range lowercase : List[str] = num_labels lowercase : int = num_choices lowercase : int = summary_type lowercase : Tuple = use_proj lowercase : Union[str, Any] = scope lowercase : List[str] = bos_token_id def __a ( self : Any ) -> Dict: """simple docstring""" lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) lowercase : str = None if self.use_input_lengths: lowercase : int = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowercase : Union[str, Any] = None if self.use_token_type_ids: lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) lowercase : Union[str, Any] = None lowercase : List[str] = None lowercase : Optional[Any] = None if self.use_labels: lowercase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase : Tuple = ids_tensor([self.batch_size] , 2 ).float() lowercase : Tuple = ids_tensor([self.batch_size] , self.num_choices ) lowercase : List[Any] = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def __a ( self : Any ) -> List[Any]: """simple docstring""" return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def __a ( self : int , _A : str , _A : Optional[Any] , _A : int , _A : List[str] , _A : Any , _A : Dict , _A : Tuple , _A : Union[str, Any] , _A : Tuple , ) -> List[Any]: """simple docstring""" lowercase : List[Any] = XLMModel(config=_A ) model.to(_A ) model.eval() lowercase : Tuple = model(_A , lengths=_A , langs=_A ) lowercase : Dict = model(_A , langs=_A ) lowercase : int = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self : int , _A : Dict , _A : int , _A : int , _A : Union[str, Any] , _A : Tuple , _A : Union[str, Any] , _A : Any , _A : Union[str, Any] , _A : Dict , ) -> Optional[Any]: """simple docstring""" lowercase : Optional[int] = XLMWithLMHeadModel(_A ) model.to(_A ) model.eval() lowercase : Tuple = model(_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self : Union[str, Any] , _A : List[str] , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Optional[Any] , _A : int , _A : Union[str, Any] , _A : Tuple , _A : int , ) -> Union[str, Any]: """simple docstring""" lowercase : Dict = XLMForQuestionAnsweringSimple(_A ) model.to(_A ) model.eval() lowercase : List[str] = model(_A ) lowercase : Any = model(_A , start_positions=_A , end_positions=_A ) lowercase : Any = outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __a ( self : Union[str, Any] , _A : int , _A : Union[str, Any] , _A : List[Any] , _A : Union[str, Any] , _A : List[str] , _A : Any , _A : Any , _A : str , _A : Union[str, Any] , ) -> Dict: """simple docstring""" lowercase : Optional[int] = XLMForQuestionAnswering(_A ) model.to(_A ) model.eval() lowercase : Any = model(_A ) lowercase : Tuple = model( _A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , p_mask=_A , ) lowercase : Optional[int] = model( _A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , ) ((lowercase) , ) : Optional[int] = result_with_labels.to_tuple() lowercase : List[str] = model(_A , start_positions=_A , end_positions=_A ) ((lowercase) , ) : Any = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def __a ( self : Union[str, Any] , _A : Optional[int] , _A : Dict , _A : int , _A : List[Any] , _A : List[str] , _A : Optional[Any] , _A : Dict , _A : Optional[int] , _A : str , ) -> int: """simple docstring""" lowercase : List[str] = XLMForSequenceClassification(_A ) model.to(_A ) model.eval() lowercase : List[str] = model(_A ) lowercase : Union[str, Any] = model(_A , labels=_A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __a ( self : Union[str, Any] , _A : str , _A : int , _A : List[str] , _A : Optional[int] , _A : Union[str, Any] , _A : Tuple , _A : Dict , _A : Any , _A : Tuple , ) -> Dict: """simple docstring""" lowercase : Optional[Any] = self.num_labels lowercase : Tuple = XLMForTokenClassification(_A ) model.to(_A ) model.eval() lowercase : str = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __a ( self : List[Any] , _A : List[str] , _A : Dict , _A : str , _A : List[str] , _A : List[str] , _A : Union[str, Any] , _A : Tuple , _A : Any , _A : Any , ) -> Union[str, Any]: """simple docstring""" lowercase : int = self.num_choices lowercase : List[Any] = XLMForMultipleChoice(config=_A ) model.to(_A ) model.eval() lowercase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : Dict = model( _A , attention_mask=_A , token_type_ids=_A , labels=_A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __a ( self : Optional[Any] ) -> List[Any]: """simple docstring""" lowercase : List[Any] = self.prepare_config_and_inputs() ( ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ) : Union[str, Any] = config_and_inputs lowercase : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths} return config, inputs_dict @require_torch class _A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): _UpperCamelCase : Any = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) _UpperCamelCase : str = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable _UpperCamelCase : Tuple = ( { '''feature-extraction''': XLMModel, '''fill-mask''': XLMWithLMHeadModel, '''question-answering''': XLMForQuestionAnsweringSimple, '''text-classification''': XLMForSequenceClassification, '''text-generation''': XLMWithLMHeadModel, '''token-classification''': XLMForTokenClassification, '''zero-shot''': XLMForSequenceClassification, } if is_torch_available() else {} ) def __a ( self : List[Any] , _A : Tuple , _A : List[str] , _A : Dict , _A : Union[str, Any] , _A : Optional[Any] ) -> List[Any]: """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('''Fast''' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def __a ( self : Dict , _A : Tuple , _A : List[str] , _A : int=False ) -> Optional[Any]: """simple docstring""" lowercase : List[str] = super()._prepare_for_class(_A , _A , return_labels=_A ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": lowercase : int = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_A ) lowercase : str = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_A ) return inputs_dict def __a ( self : Any ) -> List[str]: """simple docstring""" lowercase : List[str] = XLMModelTester(self ) lowercase : Any = ConfigTester(self , config_class=_A , emb_dim=37 ) def __a ( self : List[Any] ) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() def __a ( self : Tuple ) -> Union[str, Any]: """simple docstring""" lowercase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*_A ) def __a ( self : Any ) -> Dict: """simple docstring""" lowercase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*_A ) def __a ( self : List[str] ) -> Optional[int]: """simple docstring""" lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*_A ) def __a ( self : Union[str, Any] ) -> Tuple: """simple docstring""" lowercase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*_A ) def __a ( self : List[str] ) -> Union[str, Any]: """simple docstring""" lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*_A ) def __a ( self : Dict ) -> int: """simple docstring""" lowercase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*_A ) def __a ( self : Any ) -> List[Any]: """simple docstring""" lowercase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*_A ) def __a ( self : int , _A : Union[str, Any] , _A : int , _A : Union[str, Any] , _A : Optional[Any] , _A : List[Any] , _A : List[Any]=False , _A : Optional[int]=1 ) -> Any: """simple docstring""" self.assertIsInstance(_A , _A ) self.assertListEqual( [isinstance(_A , _A ) for iter_attentions in attentions] , [True] * len(_A ) ) self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(_A ): # adds PAD dummy token lowercase : List[Any] = min_length + idx + 1 lowercase : str = min_length + idx + 1 lowercase : Any = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_A ) ) def __a ( self : int , _A : Optional[int] , _A : Dict , _A : Any , _A : List[str] , _A : Optional[int] , _A : List[Any]=False , _A : List[Any]=1 ) -> str: """simple docstring""" self.assertIsInstance(_A , _A ) self.assertListEqual( [isinstance(_A , _A ) for iter_hidden_states in hidden_states] , [True] * len(_A ) , ) self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(_A ): # adds PAD dummy token lowercase : Union[str, Any] = min_length + idx + 1 lowercase : Optional[Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_A ) , ) pass @slow def __a ( self : Optional[int] ) -> Any: """simple docstring""" for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : Any = XLMModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @require_torch class _A ( unittest.TestCase ): @slow def __a ( self : Any ) -> Optional[Any]: """simple docstring""" lowercase : Optional[int] = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' ) model.to(_A ) lowercase : str = torch.tensor([[14, 447]] , dtype=torch.long , device=_A ) # the president lowercase : List[str] = [ 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference lowercase : Dict = model.generate(_A , do_sample=_A ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _A )
308
1
import gc import threading import time import psutil import torch class a_ : def __init__( self :Tuple) -> Union[str, Any]: UpperCAmelCase_ = psutil.Process() UpperCAmelCase_ = False def __a ( self :Optional[Any]) -> Optional[Any]: UpperCAmelCase_ = -1 while True: UpperCAmelCase_ = max(self.process.memory_info().rss , self.cpu_memory_peak) # can't sleep or will not catch the peak right (this comment is here on purpose) if not self.peak_monitoring: break def __a ( self :str) -> Dict: UpperCAmelCase_ = True UpperCAmelCase_ = threading.Thread(target=self.peak_monitor) UpperCAmelCase_ = True self.thread.start() def __a ( self :Union[str, Any]) -> Dict: UpperCAmelCase_ = False self.thread.join() return self.cpu_memory_peak UpperCamelCase_ = PeakCPUMemory() def A ( ) -> List[str]: '''simple docstring''' UpperCAmelCase_ = {'''time''': time.time()} gc.collect() torch.cuda.empty_cache() # CPU mem UpperCAmelCase_ = psutil.Process().memory_info().rss cpu_peak_tracker.start() # GPU mem for i in range(torch.cuda.device_count() ): UpperCAmelCase_ = torch.cuda.memory_allocated(__UpperCAmelCase ) torch.cuda.reset_peak_memory_stats() return measures def A ( __UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase_ = {'''time''': time.time() - start_measures['''time''']} gc.collect() torch.cuda.empty_cache() # CPU mem UpperCAmelCase_ = (psutil.Process().memory_info().rss - start_measures['''cpu''']) / 2**20 UpperCAmelCase_ = (cpu_peak_tracker.stop() - start_measures['''cpu''']) / 2**20 # GPU mem for i in range(torch.cuda.device_count() ): UpperCAmelCase_ = (torch.cuda.memory_allocated(__UpperCAmelCase ) - start_measures[str(__UpperCAmelCase )]) / 2**20 UpperCAmelCase_ = (torch.cuda.max_memory_allocated(__UpperCAmelCase ) - start_measures[str(__UpperCAmelCase )]) / 2**20 return measures def A ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' print(f"{description}:" ) print(f"- Time: {measures['time']:.2f}s" ) for i in range(torch.cuda.device_count() ): print(f"- GPU {i} allocated: {measures[str(__UpperCAmelCase )]:.2f}MiB" ) UpperCAmelCase_ = measures[f"{i}-peak"] print(f"- GPU {i} peak: {peak:.2f}MiB" ) print(f"- CPU RAM allocated: {measures['cpu']:.2f}MiB" ) print(f"- CPU RAM peak: {measures['cpu-peak']:.2f}MiB" )
365
import os import sys import tempfile import torch from .state import AcceleratorState from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment def A ( __UpperCAmelCase , __UpperCAmelCase=() , __UpperCAmelCase=None , __UpperCAmelCase="no" , __UpperCAmelCase="29500" ) -> int: '''simple docstring''' UpperCAmelCase_ = False UpperCAmelCase_ = False if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ): UpperCAmelCase_ = True elif "IPython" in sys.modules: UpperCAmelCase_ = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() ) try: UpperCAmelCase_ = PrecisionType(mixed_precision.lower() ) except ValueError: raise ValueError( f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." ) if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , __UpperCAmelCase ) is not None): # TPU launch import torch_xla.distributed.xla_multiprocessing as xmp if len(AcceleratorState._shared_state ) > 0: raise ValueError( '''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside ''' '''your training function. Restart your notebook and make sure no cells initializes an ''' '''`Accelerator`.''' ) if num_processes is None: UpperCAmelCase_ = 8 UpperCAmelCase_ = PrepareForLaunch(__UpperCAmelCase , distributed_type='''TPU''' ) print(f"Launching a training on {num_processes} TPU cores." ) xmp.spawn(__UpperCAmelCase , args=__UpperCAmelCase , nprocs=__UpperCAmelCase , start_method='''fork''' ) elif in_colab: # No need for a distributed launch otherwise as it's either CPU or one GPU. if torch.cuda.is_available(): print('''Launching training on one GPU.''' ) else: print('''Launching training on one CPU.''' ) function(*__UpperCAmelCase ) else: if num_processes is None: raise ValueError( '''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' ) if num_processes > 1: # Multi-GPU launch from torch.multiprocessing import start_processes from torch.multiprocessing.spawn import ProcessRaisedException if len(AcceleratorState._shared_state ) > 0: raise ValueError( '''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized ''' '''inside your training function. Restart your notebook and make sure no cells initializes an ''' '''`Accelerator`.''' ) if torch.cuda.is_initialized(): raise ValueError( '''To launch a multi-GPU training from your notebook, you need to avoid running any instruction ''' '''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA ''' '''function.''' ) # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=__UpperCAmelCase , master_addr='''127.0.01''' , master_port=__UpperCAmelCase , mixed_precision=__UpperCAmelCase ): UpperCAmelCase_ = PrepareForLaunch(__UpperCAmelCase , distributed_type='''MULTI_GPU''' ) print(f"Launching training on {num_processes} GPUs." ) try: start_processes(__UpperCAmelCase , args=__UpperCAmelCase , nprocs=__UpperCAmelCase , start_method='''fork''' ) except ProcessRaisedException as e: if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]: raise RuntimeError( '''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. ''' '''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. ''' '''Please review your imports and test them when running the `notebook_launcher()` to identify ''' '''which one is problematic.''' ) from e else: # No need for a distributed launch otherwise as it's either CPU, GPU or MPS. if is_mps_available(): UpperCAmelCase_ = '''1''' print('''Launching training on MPS.''' ) elif torch.cuda.is_available(): print('''Launching training on one GPU.''' ) else: print('''Launching training on CPU.''' ) function(*__UpperCAmelCase ) def A ( __UpperCAmelCase , __UpperCAmelCase=() , __UpperCAmelCase=2 ) -> Optional[Any]: '''simple docstring''' from torch.multiprocessing import start_processes with tempfile.NamedTemporaryFile() as tmp_file: # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=__UpperCAmelCase , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ): UpperCAmelCase_ = PrepareForLaunch(__UpperCAmelCase , debug=__UpperCAmelCase ) start_processes(__UpperCAmelCase , args=__UpperCAmelCase , nprocs=__UpperCAmelCase , start_method='''fork''' )
344
0
import importlib.util import os import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import ( is_accelerate_available, is_flax_available, is_safetensors_available, is_tf_available, is_torch_available, ) from . import BaseTransformersCLICommand def A_ ( A__ ) -> Any: return EnvironmentCommand() def A_ ( A__ ) -> str: return EnvironmentCommand(args.accelerate_config_file ) class A__ ( __UpperCAmelCase ): """simple docstring""" @staticmethod def __lowercase ( lowercase) -> Optional[int]: '''simple docstring''' a__ : Optional[Any] = parser.add_parser('env') download_parser.set_defaults(func=lowercase) download_parser.add_argument( '--accelerate-config_file' , default=lowercase , help='The accelerate config file to use for the default values in the launching script.' , ) download_parser.set_defaults(func=lowercase) def __init__( self , lowercase , *lowercase) -> None: '''simple docstring''' a__ : Optional[int] = accelerate_config_file def __lowercase ( self) -> List[str]: '''simple docstring''' a__ : List[Any] = 'not installed' if is_safetensors_available(): import safetensors a__ : Optional[int] = safetensors.__version__ elif importlib.util.find_spec('safetensors') is not None: import safetensors a__ : Optional[Any] = F'{safetensors.__version__} but is ignored because of PyTorch version too old.' a__ : Any = 'not installed' a__ : Optional[int] = 'not found' if is_accelerate_available(): import accelerate from accelerate.commands.config import default_config_file, load_config_from_file a__ : Tuple = accelerate.__version__ # Get the default from the config file. if self._accelerate_config_file is not None or os.path.isfile(lowercase): a__ : Dict = load_config_from_file(self._accelerate_config_file).to_dict() a__ : int = ( '\n'.join([F'\t- {prop}: {val}' for prop, val in accelerate_config.items()]) if isinstance(lowercase , lowercase) else F'\t{accelerate_config}' ) a__ : List[Any] = 'not installed' a__ : Optional[Any] = 'NA' if is_torch_available(): import torch a__ : Union[str, Any] = torch.__version__ a__ : Optional[int] = torch.cuda.is_available() a__ : Any = 'not installed' a__ : Dict = 'NA' if is_tf_available(): import tensorflow as tf a__ : Optional[int] = tf.__version__ try: # deprecated in v2.1 a__ : str = tf.test.is_gpu_available() except AttributeError: # returns list of devices, convert to bool a__ : str = bool(tf.config.list_physical_devices('GPU')) a__ : List[str] = 'not installed' a__ : Optional[Any] = 'not installed' a__ : Optional[Any] = 'not installed' a__ : Tuple = 'NA' if is_flax_available(): import flax import jax import jaxlib a__ : Any = flax.__version__ a__ : Union[str, Any] = jax.__version__ a__ : List[Any] = jaxlib.__version__ a__ : Dict = jax.lib.xla_bridge.get_backend().platform a__ : List[str] = { '`transformers` version': version, 'Platform': platform.platform(), 'Python version': platform.python_version(), 'Huggingface_hub version': huggingface_hub.__version__, 'Safetensors version': F'{safetensors_version}', 'Accelerate version': F'{accelerate_version}', 'Accelerate config': F'{accelerate_config_str}', 'PyTorch version (GPU?)': F'{pt_version} ({pt_cuda_available})', 'Tensorflow version (GPU?)': F'{tf_version} ({tf_cuda_available})', 'Flax version (CPU?/GPU?/TPU?)': F'{flax_version} ({jax_backend})', 'Jax version': F'{jax_version}', 'JaxLib version': F'{jaxlib_version}', 'Using GPU in script?': '<fill in>', 'Using distributed or parallel set-up in script?': '<fill in>', } print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n') print(self.format_dict(lowercase)) return info @staticmethod def __lowercase ( lowercase) -> Optional[Any]: '''simple docstring''' return "\n".join([F'- {prop}: {val}' for prop, val in d.items()]) + "\n"
99
def A_ ( A__ ) -> int: stooge(A__ , 0 , len(A__ ) - 1 ) return arr def A_ ( A__ , A__ , A__ ) -> List[Any]: if i >= h: return # If first element is smaller than the last then swap them if arr[i] > arr[h]: a__ , a__ : List[str] = arr[h], arr[i] # If there are more than 2 elements in the array if h - i + 1 > 2: a__ : Dict = (int)((h - i + 1) / 3 ) # Recursively sort first 2/3 elements stooge(A__ , A__ , (h - t) ) # Recursively sort last 2/3 elements stooge(A__ , i + t , (A__) ) # Recursively sort first 2/3 elements stooge(A__ , A__ , (h - t) ) if __name__ == "__main__": lowercase : Dict = input("""Enter numbers separated by a comma:\n""").strip() lowercase : Dict = [int(item) for item in user_input.split(""",""")] print(stooge_sort(unsorted))
99
1
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any ) -> str: '''simple docstring''' if height >= 1: move_tower(height - 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) move_disk(_UpperCAmelCase , _UpperCAmelCase ) move_tower(height - 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple ) -> List[str]: '''simple docstring''' print('moving disk from' , _UpperCAmelCase , 'to' , _UpperCAmelCase ) def A ( ) -> Tuple: '''simple docstring''' _UpperCAmelCase = int(input('Height of hanoi: ' ).strip() ) move_tower(_UpperCAmelCase , 'A' , 'B' , 'C' ) if __name__ == "__main__": main()
355
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase__ = { "configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"], "tokenization_deberta": ["DebertaTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = ["DebertaTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ "DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "DebertaForMaskedLM", "DebertaForQuestionAnswering", "DebertaForSequenceClassification", "DebertaForTokenClassification", "DebertaModel", "DebertaPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ "TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDebertaForMaskedLM", "TFDebertaForQuestionAnswering", "TFDebertaForSequenceClassification", "TFDebertaForTokenClassification", "TFDebertaModel", "TFDebertaPreTrainedModel", ] if TYPE_CHECKING: from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig from .tokenization_deberta import DebertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_deberta_fast import DebertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deberta import ( DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, DebertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deberta import ( TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFDebertaForMaskedLM, TFDebertaForQuestionAnswering, TFDebertaForSequenceClassification, TFDebertaForTokenClassification, TFDebertaModel, TFDebertaPreTrainedModel, ) else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
290
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A = {"configuration_wavlm": ["WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "WavLMConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST", "WavLMForAudioFrameClassification", "WavLMForCTC", "WavLMForSequenceClassification", "WavLMForXVector", "WavLMModel", "WavLMPreTrainedModel", ] if TYPE_CHECKING: from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavlm import ( WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST, WavLMForAudioFrameClassification, WavLMForCTC, WavLMForSequenceClassification, WavLMForXVector, WavLMModel, WavLMPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
164
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): lowerCamelCase : Union[str, Any] = StableDiffusionInstructPixaPixPipeline lowerCamelCase : Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""} lowerCamelCase : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS lowerCamelCase : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS lowerCamelCase : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS def A__ ( self ) -> List[Any]: '''simple docstring''' torch.manual_seed(0 ) lowercase__ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) lowercase__ = PNDMScheduler(skip_prk_steps=lowerCamelCase__ ) torch.manual_seed(0 ) lowercase__ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) lowercase__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) lowercase__ = CLIPTextModel(lowerCamelCase__ ) lowercase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowercase__ = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def A__ ( self , lowerCamelCase__ , lowerCamelCase__=0 ) -> Optional[Any]: '''simple docstring''' lowercase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ ) lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowercase__ = Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert("""RGB""" ) if str(lowerCamelCase__ ).startswith("""mps""" ): lowercase__ = torch.manual_seed(lowerCamelCase__ ) else: lowercase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ ) lowercase__ = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """image_guidance_scale""": 1, """output_type""": """numpy""", } return inputs def A__ ( self ) -> str: '''simple docstring''' lowercase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator lowercase__ = self.get_dummy_components() lowercase__ = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase__ ) lowercase__ = sd_pipe.to(lowerCamelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ ) lowercase__ = self.get_dummy_inputs(lowerCamelCase__ ) lowercase__ = sd_pipe(**lowerCamelCase__ ).images lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowercase__ = np.array([0.75_26, 0.37_50, 0.45_47, 0.61_17, 0.58_66, 0.50_16, 0.43_27, 0.56_42, 0.48_15] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def A__ ( self ) -> Dict: '''simple docstring''' lowercase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator lowercase__ = self.get_dummy_components() lowercase__ = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase__ ) lowercase__ = sd_pipe.to(lowerCamelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ ) lowercase__ = self.get_dummy_inputs(lowerCamelCase__ ) lowercase__ = """french fries""" lowercase__ = sd_pipe(**lowerCamelCase__ , negative_prompt=lowerCamelCase__ ) lowercase__ = output.images lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowercase__ = np.array([0.75_11, 0.36_42, 0.45_53, 0.62_36, 0.57_97, 0.50_13, 0.43_43, 0.56_11, 0.48_31] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def A__ ( self ) -> Optional[Any]: '''simple docstring''' lowercase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator lowercase__ = self.get_dummy_components() lowercase__ = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase__ ) lowercase__ = sd_pipe.to(lowerCamelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ ) lowercase__ = self.get_dummy_inputs(lowerCamelCase__ ) lowercase__ = [inputs["""prompt"""]] * 2 lowercase__ = np.array(inputs["""image"""] ).astype(np.floataa ) / 2_55.0 lowercase__ = torch.from_numpy(lowerCamelCase__ ).unsqueeze(0 ).to(lowerCamelCase__ ) lowercase__ = image / 2 + 0.5 lowercase__ = image.permute(0 , 3 , 1 , 2 ) lowercase__ = image.repeat(2 , 1 , 1 , 1 ) lowercase__ = sd_pipe(**lowerCamelCase__ ).images lowercase__ = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) lowercase__ = np.array([0.58_12, 0.57_48, 0.52_22, 0.59_08, 0.56_95, 0.71_74, 0.68_04, 0.55_23, 0.55_79] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def A__ ( self ) -> Any: '''simple docstring''' lowercase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator lowercase__ = self.get_dummy_components() lowercase__ = EulerAncestralDiscreteScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" ) lowercase__ = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase__ ) lowercase__ = sd_pipe.to(lowerCamelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ ) lowercase__ = self.get_dummy_inputs(lowerCamelCase__ ) lowercase__ = sd_pipe(**lowerCamelCase__ ).images lowercase__ = image[0, -3:, -3:, -1] lowercase__ = [round(lowerCamelCase__ , 4 ) for x in image_slice.flatten().tolist()] print(""",""".join([str(lowerCamelCase__ ) for x in slice] ) ) assert image.shape == (1, 32, 32, 3) lowercase__ = np.array([0.74_17, 0.38_42, 0.47_32, 0.57_76, 0.58_91, 0.51_39, 0.40_52, 0.56_73, 0.49_86] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def A__ ( self ) -> Optional[Any]: '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def A__ ( self ) -> Tuple: '''simple docstring''' lowercase__ = self.get_dummy_components() lowercase__ = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase__ ) lowercase__ = VaeImageProcessor(do_resize=lowerCamelCase__ , do_normalize=lowerCamelCase__ ) lowercase__ = pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) lowercase__ = pipe(**self.get_dummy_inputs_by_type(lowerCamelCase__ , input_image_type="""pt""" ) )[0] lowercase__ = components["""vae"""] lowercase__ = self.get_dummy_inputs_by_type(lowerCamelCase__ , input_image_type="""pt""" ) for image_param in self.image_latents_params: if image_param in inputs.keys(): lowercase__ = vae.encode(inputs[image_param] ).latent_dist.mode() lowercase__ = pipe(**lowerCamelCase__ )[0] lowercase__ = np.abs(out - out_latents_inputs ).max() self.assertLess(lowerCamelCase__ , 1e-4 , """passing latents as image input generate different result from passing image""" ) @slow @require_torch_gpu class A ( unittest.TestCase ): def A__ ( self ) -> Union[str, Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def A__ ( self , lowerCamelCase__=0 ) -> int: '''simple docstring''' lowercase__ = torch.manual_seed(lowerCamelCase__ ) lowercase__ = load_image( """https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" ) lowercase__ = { """prompt""": """turn him into a cyborg""", """image""": image, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """image_guidance_scale""": 1.0, """output_type""": """numpy""", } return inputs def A__ ( self ) -> str: '''simple docstring''' lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained( """timbrooks/instruct-pix2pix""" , safety_checker=lowerCamelCase__ ) pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) pipe.enable_attention_slicing() lowercase__ = self.get_inputs() lowercase__ = pipe(**lowerCamelCase__ ).images lowercase__ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) lowercase__ = np.array([0.59_02, 0.60_15, 0.60_27, 0.59_83, 0.60_92, 0.60_61, 0.57_65, 0.57_85, 0.55_55] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def A__ ( self ) -> List[Any]: '''simple docstring''' lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained( """timbrooks/instruct-pix2pix""" , safety_checker=lowerCamelCase__ ) lowercase__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) pipe.enable_attention_slicing() lowercase__ = self.get_inputs() lowercase__ = pipe(**lowerCamelCase__ ).images lowercase__ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) lowercase__ = np.array([0.65_78, 0.68_17, 0.69_72, 0.67_61, 0.68_56, 0.69_16, 0.64_28, 0.65_16, 0.63_01] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def A__ ( self ) -> str: '''simple docstring''' lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained( """timbrooks/instruct-pix2pix""" , safety_checker=lowerCamelCase__ ) lowercase__ = DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) pipe.enable_attention_slicing() lowercase__ = self.get_inputs() lowercase__ = pipe(**lowerCamelCase__ ).images lowercase__ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) lowercase__ = np.array([0.38_28, 0.38_34, 0.38_18, 0.37_92, 0.38_65, 0.37_52, 0.37_92, 0.38_47, 0.37_53] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def A__ ( self ) -> int: '''simple docstring''' lowercase__ = 0 def callback_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> None: lowercase__ = True nonlocal number_of_steps number_of_steps += 1 if step == 1: lowercase__ = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) lowercase__ = latents[0, -3:, -3:, -1] lowercase__ = np.array([-0.24_63, -0.46_44, -0.97_56, 1.51_76, 1.44_14, 0.78_66, 0.98_97, 0.85_21, 0.79_83] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: lowercase__ = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) lowercase__ = latents[0, -3:, -3:, -1] lowercase__ = np.array([-0.26_44, -0.46_26, -0.96_53, 1.51_76, 1.45_51, 0.76_86, 0.98_05, 0.84_52, 0.81_15] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 lowercase__ = False lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained( """timbrooks/instruct-pix2pix""" , safety_checker=lowerCamelCase__ , torch_dtype=torch.floataa ) lowercase__ = pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) pipe.enable_attention_slicing() lowercase__ = self.get_inputs() pipe(**lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def A__ ( self ) -> Tuple: '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained( """timbrooks/instruct-pix2pix""" , safety_checker=lowerCamelCase__ , torch_dtype=torch.floataa ) lowercase__ = pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() lowercase__ = self.get_inputs() lowercase__ = pipe(**lowerCamelCase__ ) lowercase__ = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def A__ ( self ) -> Optional[Any]: '''simple docstring''' lowercase__ = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 lowercase__ = inputs["""image"""].resize((504, 504) ) lowercase__ = """timbrooks/instruct-pix2pix""" lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained( lowerCamelCase__ , safety_checker=lowerCamelCase__ , ) pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) pipe.enable_attention_slicing() lowercase__ = pipe(**lowerCamelCase__ ) lowercase__ = output.images[0] lowercase__ = image[255:258, 383:386, -1] assert image.shape == (504, 504, 3) lowercase__ = np.array([0.27_26, 0.25_29, 0.26_64, 0.26_55, 0.26_41, 0.26_42, 0.25_91, 0.26_49, 0.25_90] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
164
1
"""simple docstring""" from __future__ import annotations def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> list[list[int]]: _lowerCAmelCase =[] _lowerCAmelCase =[] _lowerCAmelCase =0 _lowerCAmelCase =sum(__UpperCamelCase ) create_state_space_tree(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) return result def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> None: if sum(__UpperCamelCase ) > max_sum or (remaining_nums_sum + sum(__UpperCamelCase )) < max_sum: return if sum(__UpperCamelCase ) == max_sum: result.append(__UpperCamelCase ) return for index in range(__UpperCamelCase , len(__UpperCamelCase ) ): create_state_space_tree( __UpperCamelCase , __UpperCamelCase , index + 1 , [*path, nums[index]] , __UpperCamelCase , remaining_nums_sum - nums[index] , ) __A = [3, 34, 4, 12, 5, 2] __A = 9 __A = generate_sum_of_subsets_soln(nums, max_sum) print(*result)
341
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __A = logging.get_logger(__name__) __A = '▁' __A = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'} __A = { 'vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model', }, 'monolingual_vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt', }, } __A = {'vinai/bartpho-syllable': 1024} class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = VOCAB_FILES_NAMES lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase = ['''input_ids''', '''attention_mask'''] def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None: # Mask token behave like a normal word, i.e. include the space before it _lowerCAmelCase =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token _lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , ) _lowerCAmelCase =vocab_file _lowerCAmelCase =monolingual_vocab_file _lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__UpperCAmelCase ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility _lowerCAmelCase ={} _lowerCAmelCase =0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids: _lowerCAmelCase =cnt cnt += 1 with open(__UpperCAmelCase , """r""" , encoding="""utf-8""" ) as f: for line in f.readlines(): _lowerCAmelCase =line.strip().split()[0] _lowerCAmelCase =len(self.fairseq_tokens_to_ids ) if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids: _lowerCAmelCase =len(self.fairseq_tokens_to_ids ) _lowerCAmelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> Dict: _lowerCAmelCase =self.__dict__.copy() _lowerCAmelCase =None _lowerCAmelCase =self.sp_model.serialized_model_proto() return state def __setstate__( self , __UpperCAmelCase ) -> List[Any]: _lowerCAmelCase =d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _lowerCAmelCase ={} _lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _lowerCAmelCase =[self.cls_token_id] _lowerCAmelCase =[self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(__UpperCAmelCase )) + [1] return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1] def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: _lowerCAmelCase =[self.sep_token_id] _lowerCAmelCase =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _lowerCAmelCase ( self ) -> Union[str, Any]: return len(self.fairseq_ids_to_tokens ) def _lowerCAmelCase ( self ) -> List[Any]: _lowerCAmelCase ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]: return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]: return self.fairseq_ids_to_tokens[index] def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]: _lowerCAmelCase ="""""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip() return out_string def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _lowerCAmelCase =os.path.join( __UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) _lowerCAmelCase =os.path.join( __UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase , """wb""" ) as fi: _lowerCAmelCase =self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( __UpperCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , __UpperCAmelCase ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f'''{str(__UpperCAmelCase )} \n''' ) return out_vocab_file, out_monolingual_vocab_file
341
1
"""simple docstring""" import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) __SCREAMING_SNAKE_CASE =pytest.mark.integration @pytest.mark.parametrize('path' , ['paws', 'csv'] ) def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any ): inspect_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = path + '.py' assert script_name in os.listdir(__SCREAMING_SNAKE_CASE ) assert "__pycache__" not in os.listdir(__SCREAMING_SNAKE_CASE ) @pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' ) @pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' ) @pytest.mark.parametrize('path' , ['accuracy'] ) def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] ): inspect_metric(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : str = path + '.py' assert script_name in os.listdir(__SCREAMING_SNAKE_CASE ) assert "__pycache__" not in os.listdir(__SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( 'path, config_name, expected_splits' , [ ('squad', 'plain_text', ['train', 'validation']), ('dalle-mini/wit', 'dalle-mini--wit', ['train']), ('paws', 'labeled_final', ['train', 'test', 'validation']), ] , ) def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] ): lowercase_ : Tuple = get_dataset_config_info(__SCREAMING_SNAKE_CASE , config_name=__SCREAMING_SNAKE_CASE ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( 'path, config_name, expected_exception' , [ ('paws', None, ValueError), ] , ) def lowercase__( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple ): with pytest.raises(__SCREAMING_SNAKE_CASE ): get_dataset_config_info(__SCREAMING_SNAKE_CASE , config_name=__SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( 'path, expected' , [ ('squad', 'plain_text'), ('acronym_identification', 'default'), ('lhoestq/squad', 'plain_text'), ('lhoestq/test', 'default'), ('lhoestq/demo1', 'lhoestq--demo1'), ('dalle-mini/wit', 'dalle-mini--wit'), ] , ) def lowercase__( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int ): lowercase_ : Tuple = get_dataset_config_names(__SCREAMING_SNAKE_CASE ) assert expected in config_names @pytest.mark.parametrize( 'path, expected_configs, expected_splits_in_first_config' , [ ('squad', ['plain_text'], ['train', 'validation']), ('dalle-mini/wit', ['dalle-mini--wit'], ['train']), ('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']), ] , ) def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ): lowercase_ : Optional[int] = get_dataset_infos(__SCREAMING_SNAKE_CASE ) assert list(infos.keys() ) == expected_configs lowercase_ : Optional[int] = expected_configs[0] assert expected_config in infos lowercase_ : Dict = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( 'path, expected_config, expected_splits' , [ ('squad', 'plain_text', ['train', 'validation']), ('dalle-mini/wit', 'dalle-mini--wit', ['train']), ('paws', 'labeled_final', ['train', 'test', 'validation']), ] , ) def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict ): lowercase_ : int = get_dataset_infos(__SCREAMING_SNAKE_CASE ) assert expected_config in infos lowercase_ : Optional[int] = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( 'path, config_name, expected_exception' , [ ('paws', None, ValueError), ] , ) def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] ): with pytest.raises(__SCREAMING_SNAKE_CASE ): get_dataset_split_names(__SCREAMING_SNAKE_CASE , config_name=__SCREAMING_SNAKE_CASE )
213
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A__ : Optional[Any] ={ '''configuration_bigbird_pegasus''': [ '''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BigBirdPegasusConfig''', '''BigBirdPegasusOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Union[str, Any] =[ '''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BigBirdPegasusForCausalLM''', '''BigBirdPegasusForConditionalGeneration''', '''BigBirdPegasusForQuestionAnswering''', '''BigBirdPegasusForSequenceClassification''', '''BigBirdPegasusModel''', '''BigBirdPegasusPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys A__ : Optional[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
70
0
"""simple docstring""" import collections.abc from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_poolformer import PoolFormerConfig a : List[Any] = logging.get_logger(__name__) # General docstring a : str = """PoolFormerConfig""" # Base docstring a : Optional[int] = """sail/poolformer_s12""" a : Any = [1, 512, 7, 7] # Image classification docstring a : int = """sail/poolformer_s12""" a : int = """tabby, tabby cat""" a : Union[str, Any] = [ """sail/poolformer_s12""", # See all PoolFormer models at https://huggingface.co/models?filter=poolformer ] def lowercase__(A , A = 0.0 , A = False ) ->Any: """simple docstring""" if drop_prob == 0.0 or not training: return input lowercase__ : Union[str, Any]= 1 - drop_prob lowercase__ : Optional[int]= (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets lowercase__ : Union[str, Any]= keep_prob + torch.rand(A , dtype=input.dtype , device=input.device ) random_tensor.floor_() # binarize lowercase__ : Any= input.div(A ) * random_tensor return output class __UpperCAmelCase( nn.Module ): """simple docstring""" def __init__( self , snake_case__ = None ): '''simple docstring''' super().__init__() lowercase__ : Union[str, Any]= drop_prob def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' return drop_path(snake_case__ , self.drop_prob , self.training ) def UpperCAmelCase_ ( self ): '''simple docstring''' return "p={}".format(self.drop_prob ) class __UpperCAmelCase( nn.Module ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None ): '''simple docstring''' super().__init__() lowercase__ : str= patch_size if isinstance(snake_case__ , collections.abc.Iterable ) else (patch_size, patch_size) lowercase__ : List[Any]= stride if isinstance(snake_case__ , collections.abc.Iterable ) else (stride, stride) lowercase__ : str= padding if isinstance(snake_case__ , collections.abc.Iterable ) else (padding, padding) lowercase__ : List[Any]= nn.Convad(snake_case__ , snake_case__ , kernel_size=snake_case__ , stride=snake_case__ , padding=snake_case__ ) lowercase__ : Tuple= norm_layer(snake_case__ ) if norm_layer else nn.Identity() def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' lowercase__ : List[Any]= self.projection(snake_case__ ) lowercase__ : Dict= self.norm(snake_case__ ) return embeddings class __UpperCAmelCase( nn.GroupNorm ): """simple docstring""" def __init__( self , snake_case__ , **snake_case__ ): '''simple docstring''' super().__init__(1 , snake_case__ , **snake_case__ ) class __UpperCAmelCase( nn.Module ): """simple docstring""" def __init__( self , snake_case__ ): '''simple docstring''' super().__init__() lowercase__ : int= nn.AvgPoolad(snake_case__ , stride=1 , padding=pool_size // 2 , count_include_pad=snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' return self.pool(snake_case__ ) - hidden_states class __UpperCAmelCase( nn.Module ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' super().__init__() lowercase__ : int= nn.Convad(snake_case__ , snake_case__ , 1 ) lowercase__ : List[str]= nn.Convad(snake_case__ , snake_case__ , 1 ) lowercase__ : List[str]= PoolFormerDropPath(snake_case__ ) if isinstance(config.hidden_act , snake_case__ ): lowercase__ : Optional[Any]= ACTaFN[config.hidden_act] else: lowercase__ : str= config.hidden_act def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' lowercase__ : Any= self.conva(snake_case__ ) lowercase__ : Any= self.act_fn(snake_case__ ) lowercase__ : List[Any]= self.drop(snake_case__ ) lowercase__ : Optional[Any]= self.conva(snake_case__ ) lowercase__ : int= self.drop(snake_case__ ) return hidden_states class __UpperCAmelCase( nn.Module ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' super().__init__() lowercase__ : Dict= PoolFormerPooling(snake_case__ ) lowercase__ : Any= PoolFormerOutput(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) lowercase__ : List[str]= PoolFormerGroupNorm(snake_case__ ) lowercase__ : Dict= PoolFormerGroupNorm(snake_case__ ) # Useful for training neural nets lowercase__ : List[Any]= PoolFormerDropPath(snake_case__ ) if drop_path > 0.0 else nn.Identity() lowercase__ : Dict= config.use_layer_scale if config.use_layer_scale: lowercase__ : Optional[Any]= nn.Parameter( config.layer_scale_init_value * torch.ones((snake_case__) ) , requires_grad=snake_case__ ) lowercase__ : List[Any]= nn.Parameter( config.layer_scale_init_value * torch.ones((snake_case__) ) , requires_grad=snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' if self.use_layer_scale: lowercase__ : Any= self.pooling(self.before_norm(snake_case__ ) ) lowercase__ : Dict= self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output # First residual connection lowercase__ : Any= hidden_states + self.drop_path(snake_case__ ) lowercase__ : List[Any]= () lowercase__ : Union[str, Any]= self.output(self.after_norm(snake_case__ ) ) lowercase__ : List[str]= self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output # Second residual connection lowercase__ : int= hidden_states + self.drop_path(snake_case__ ) lowercase__ : str= (output,) + outputs return outputs else: lowercase__ : str= self.drop_path(self.pooling(self.before_norm(snake_case__ ) ) ) # First residual connection lowercase__ : Optional[Any]= pooling_output + hidden_states lowercase__ : str= () # Second residual connection inside the PoolFormerOutput block lowercase__ : Dict= self.drop_path(self.output(self.after_norm(snake_case__ ) ) ) lowercase__ : Any= hidden_states + layer_output lowercase__ : Union[str, Any]= (output,) + outputs return outputs class __UpperCAmelCase( nn.Module ): """simple docstring""" def __init__( self , snake_case__ ): '''simple docstring''' super().__init__() lowercase__ : Tuple= config # stochastic depth decay rule lowercase__ : Optional[int]= [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )] # patch embeddings lowercase__ : Tuple= [] for i in range(config.num_encoder_blocks ): embeddings.append( PoolFormerEmbeddings( patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) ) lowercase__ : int= nn.ModuleList(snake_case__ ) # Transformer blocks lowercase__ : int= [] lowercase__ : Dict= 0 for i in range(config.num_encoder_blocks ): # each block consists of layers lowercase__ : List[Any]= [] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i] ): layers.append( PoolFormerLayer( snake_case__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) ) blocks.append(nn.ModuleList(snake_case__ ) ) lowercase__ : Tuple= nn.ModuleList(snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__=False , snake_case__=True ): '''simple docstring''' lowercase__ : int= () if output_hidden_states else None lowercase__ : Tuple= pixel_values for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ): lowercase__ : int= layers # Get patch embeddings from hidden_states lowercase__ : Tuple= embedding_layer(snake_case__ ) # Send the embeddings through the blocks for _, blk in enumerate(snake_case__ ): lowercase__ : Tuple= blk(snake_case__ ) lowercase__ : str= layer_outputs[0] if output_hidden_states: lowercase__ : Optional[Any]= all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=snake_case__ , hidden_states=snake_case__ ) class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = PoolFormerConfig __lowerCamelCase = "poolformer" __lowerCamelCase = "pixel_values" __lowerCamelCase = True def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' if isinstance(snake_case__ , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(snake_case__ , nn.LayerNorm ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__=False ): '''simple docstring''' if isinstance(snake_case__ , snake_case__ ): lowercase__ : str= value a : int = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ a : Optional[int] = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`PoolFormerImageProcessor.__call__`] for details. """ @add_start_docstrings( "The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , SCREAMING_SNAKE_CASE__ , ) class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def __init__( self , snake_case__ ): '''simple docstring''' super().__init__(snake_case__ ) lowercase__ : List[str]= config lowercase__ : Union[str, Any]= PoolFormerEncoder(snake_case__ ) # Initialize weights and apply final processing self.post_init() def UpperCAmelCase_ ( self ): '''simple docstring''' return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(snake_case__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def UpperCAmelCase_ ( self , snake_case__ = None , snake_case__ = None , snake_case__ = None , ): '''simple docstring''' lowercase__ : Union[str, Any]= ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowercase__ : Union[str, Any]= return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values" ) lowercase__ : Any= self.encoder( snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , ) lowercase__ : List[Any]= encoder_outputs[0] if not return_dict: return (sequence_output, None) + encoder_outputs[1:] return BaseModelOutputWithNoAttention( last_hidden_state=snake_case__ , hidden_states=encoder_outputs.hidden_states , ) class __UpperCAmelCase( nn.Module ): """simple docstring""" def __init__( self , snake_case__ ): '''simple docstring''' super().__init__() lowercase__ : str= nn.Linear(config.hidden_size , config.hidden_size ) def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' lowercase__ : List[Any]= self.dense(snake_case__ ) return output @add_start_docstrings( "\n PoolFormer Model transformer with an image classification head on top\n " , SCREAMING_SNAKE_CASE__ , ) class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def __init__( self , snake_case__ ): '''simple docstring''' super().__init__(snake_case__ ) lowercase__ : List[str]= config.num_labels lowercase__ : int= PoolFormerModel(snake_case__ ) # Final norm lowercase__ : Union[str, Any]= PoolFormerGroupNorm(config.hidden_sizes[-1] ) # Classifier head lowercase__ : Tuple= ( nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def UpperCAmelCase_ ( self , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , ): '''simple docstring''' lowercase__ : List[Any]= return_dict if return_dict is not None else self.config.use_return_dict lowercase__ : List[str]= self.poolformer( snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , ) lowercase__ : Dict= outputs[0] lowercase__ : List[str]= self.classifier(self.norm(snake_case__ ).mean([-2, -1] ) ) lowercase__ : Dict= None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: lowercase__ : List[Any]= "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): lowercase__ : Optional[int]= "single_label_classification" else: lowercase__ : Optional[int]= "multi_label_classification" if self.config.problem_type == "regression": lowercase__ : Optional[int]= MSELoss() if self.num_labels == 1: lowercase__ : Tuple= loss_fct(logits.squeeze() , labels.squeeze() ) else: lowercase__ : Optional[Any]= loss_fct(snake_case__ , snake_case__ ) elif self.config.problem_type == "single_label_classification": lowercase__ : str= CrossEntropyLoss() lowercase__ : Optional[int]= loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": lowercase__ : Optional[int]= BCEWithLogitsLoss() lowercase__ : int= loss_fct(snake_case__ , snake_case__ ) if not return_dict: lowercase__ : Optional[Any]= (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=snake_case__ , logits=snake_case__ , hidden_states=outputs.hidden_states )
369
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a : List[str] = { """configuration_xlm_roberta_xl""": [ """XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMRobertaXLConfig""", """XLMRobertaXLOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : List[str] = [ """XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLMRobertaXLForCausalLM""", """XLMRobertaXLForMaskedLM""", """XLMRobertaXLForMultipleChoice""", """XLMRobertaXLForQuestionAnswering""", """XLMRobertaXLForSequenceClassification""", """XLMRobertaXLForTokenClassification""", """XLMRobertaXLModel""", """XLMRobertaXLPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaXLConfig, XLMRobertaXLOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaXLForCausalLM, XLMRobertaXLForMaskedLM, XLMRobertaXLForMultipleChoice, XLMRobertaXLForQuestionAnswering, XLMRobertaXLForSequenceClassification, XLMRobertaXLForTokenClassification, XLMRobertaXLModel, XLMRobertaXLPreTrainedModel, ) else: import sys a : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
150
0
"""simple docstring""" import os import pytest from transformers.dynamic_module_utils import get_imports __A = "\nimport os\n" __A = "\ndef foo():\n import os\n return False\n" __A = "\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n" __A = "\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n" __A = "\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n" __A = "\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n" __A = "\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n" __A = "\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n" __A = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n" __A = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n" __A = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize("case" , __SCREAMING_SNAKE_CASE ) def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]: __lowerCAmelCase: Any = os.path.join(__SCREAMING_SNAKE_CASE , "test_file.py" ) with open(__SCREAMING_SNAKE_CASE , "w" ) as _tmp_file: _tmp_file.write(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase: Optional[Any] = get_imports(__SCREAMING_SNAKE_CASE ) assert parsed_imports == ["os"]
217
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: __A = None __A = logging.get_logger(__name__) __A = {"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"} __A = { "vocab_file": { "google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model", }, "tokenizer_file": { "google/rembert": "https://huggingface.co/google/rembert/resolve/main/tokenizer.json", }, } __A = { "google/rembert": 256, } __A = "▁" class snake_case ( __snake_case ): SCREAMING_SNAKE_CASE_ : Tuple = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ : Any = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ : Dict = RemBertTokenizer def __init__( self : Tuple , UpperCamelCase__ : Dict=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : str=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : int="[CLS]" , UpperCamelCase__ : Optional[Any]="[SEP]" , UpperCamelCase__ : List[str]="<unk>" , UpperCamelCase__ : Dict="[SEP]" , UpperCamelCase__ : int="<pad>" , UpperCamelCase__ : Any="[CLS]" , UpperCamelCase__ : str="[MASK]" , **UpperCamelCase__ : Optional[Any] , )-> List[Any]: '''simple docstring''' __lowerCAmelCase: int = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__) if isinstance(UpperCamelCase__ , UpperCamelCase__) else mask_token super().__init__( UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , **UpperCamelCase__ , ) __lowerCAmelCase: Optional[int] = do_lower_case __lowerCAmelCase: int = remove_space __lowerCAmelCase: int = keep_accents __lowerCAmelCase: str = vocab_file __lowerCAmelCase: Tuple = False if not self.vocab_file else True def lowercase_ ( self : Any , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None)-> List[int]: '''simple docstring''' __lowerCAmelCase: Optional[int] = [self.sep_token_id] __lowerCAmelCase: Any = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False)-> List[int]: '''simple docstring''' if already_has_special_tokens: if token_ids_a is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model.") return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(UpperCamelCase__)) + [1] + ([0] * len(UpperCamelCase__)) + [1] return [1] + ([0] * len(UpperCamelCase__)) + [1] def lowercase_ ( self : Tuple , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None)-> List[int]: '''simple docstring''' __lowerCAmelCase: Optional[int] = [self.sep_token_id] __lowerCAmelCase: Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None)-> Tuple[str]: '''simple docstring''' if not os.path.isdir(UpperCamelCase__): logger.error("Vocabulary path ({}) should be a directory".format(UpperCamelCase__)) return __lowerCAmelCase: Optional[Any] = os.path.join( UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase__): copyfile(self.vocab_file , UpperCamelCase__) return (out_vocab_file,)
217
1
class A : def __init__( self, UpperCamelCase__ ): """simple docstring""" lowerCAmelCase_ = n lowerCAmelCase_ = [None] * self.n lowerCAmelCase_ = 0 # index of the first element lowerCAmelCase_ = 0 lowerCAmelCase_ = 0 def __len__( self ): """simple docstring""" return self.size def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return self.size == 0 def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return False if self.is_empty() else self.array[self.front] def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" if self.size >= self.n: raise Exception('''QUEUE IS FULL''' ) lowerCAmelCase_ = data lowerCAmelCase_ = (self.rear + 1) % self.n self.size += 1 return self def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" if self.size == 0: raise Exception('''UNDERFLOW''' ) lowerCAmelCase_ = self.array[self.front] lowerCAmelCase_ = None lowerCAmelCase_ = (self.front + 1) % self.n self.size -= 1 return temp
360
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class A ( __UpperCAmelCase ): __snake_case = ['image_processor', 'tokenizer'] __snake_case = 'OwlViTImageProcessor' __snake_case = ('CLIPTokenizer', 'CLIPTokenizerFast') def __init__( self, UpperCamelCase__=None, UpperCamelCase__=None, **UpperCamelCase__ ): """simple docstring""" lowerCAmelCase_ = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''', UpperCamelCase__, ) lowerCAmelCase_ = kwargs.pop('''feature_extractor''' ) lowerCAmelCase_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(UpperCamelCase__, UpperCamelCase__ ) def __call__( self, UpperCamelCase__=None, UpperCamelCase__=None, UpperCamelCase__=None, UpperCamelCase__="max_length", UpperCamelCase__="np", **UpperCamelCase__ ): """simple docstring""" if text is None and query_images is None and images is None: raise ValueError( '''You have to specify at least one text or query image or image. All three cannot be none.''' ) if text is not None: if isinstance(UpperCamelCase__, UpperCamelCase__ ) or (isinstance(UpperCamelCase__, UpperCamelCase__ ) and not isinstance(text[0], UpperCamelCase__ )): lowerCAmelCase_ = [self.tokenizer(UpperCamelCase__, padding=UpperCamelCase__, return_tensors=UpperCamelCase__, **UpperCamelCase__ )] elif isinstance(UpperCamelCase__, UpperCamelCase__ ) and isinstance(text[0], UpperCamelCase__ ): lowerCAmelCase_ = [] # Maximum number of queries across batch lowerCAmelCase_ = max([len(UpperCamelCase__ ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(UpperCamelCase__ ) != max_num_queries: lowerCAmelCase_ = t + [''' '''] * (max_num_queries - len(UpperCamelCase__ )) lowerCAmelCase_ = self.tokenizer(UpperCamelCase__, padding=UpperCamelCase__, return_tensors=UpperCamelCase__, **UpperCamelCase__ ) encodings.append(UpperCamelCase__ ) else: raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' ) if return_tensors == "np": lowerCAmelCase_ = np.concatenate([encoding['''input_ids'''] for encoding in encodings], axis=0 ) lowerCAmelCase_ = np.concatenate([encoding['''attention_mask'''] for encoding in encodings], axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp lowerCAmelCase_ = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings], axis=0 ) lowerCAmelCase_ = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings], axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch lowerCAmelCase_ = torch.cat([encoding['''input_ids'''] for encoding in encodings], dim=0 ) lowerCAmelCase_ = torch.cat([encoding['''attention_mask'''] for encoding in encodings], dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf lowerCAmelCase_ = tf.stack([encoding['''input_ids'''] for encoding in encodings], axis=0 ) lowerCAmelCase_ = tf.stack([encoding['''attention_mask'''] for encoding in encodings], axis=0 ) else: raise ValueError('''Target return tensor type could not be returned''' ) lowerCAmelCase_ = BatchEncoding() lowerCAmelCase_ = input_ids lowerCAmelCase_ = attention_mask if query_images is not None: lowerCAmelCase_ = BatchEncoding() lowerCAmelCase_ = self.image_processor( UpperCamelCase__, return_tensors=UpperCamelCase__, **UpperCamelCase__ ).pixel_values lowerCAmelCase_ = query_pixel_values if images is not None: lowerCAmelCase_ = self.image_processor(UpperCamelCase__, return_tensors=UpperCamelCase__, **UpperCamelCase__ ) if text is not None and images is not None: lowerCAmelCase_ = image_features.pixel_values return encoding elif query_images is not None and images is not None: lowerCAmelCase_ = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**UpperCamelCase__ ), tensor_type=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self, *UpperCamelCase__, **UpperCamelCase__ ): """simple docstring""" return self.image_processor.post_process(*UpperCamelCase__, **UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self, *UpperCamelCase__, **UpperCamelCase__ ): """simple docstring""" return self.image_processor.post_process_object_detection(*UpperCamelCase__, **UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self, *UpperCamelCase__, **UpperCamelCase__ ): """simple docstring""" return self.image_processor.post_process_image_guided_detection(*UpperCamelCase__, **UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self, *UpperCamelCase__, **UpperCamelCase__ ): """simple docstring""" return self.tokenizer.batch_decode(*UpperCamelCase__, **UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self, *UpperCamelCase__, **UpperCamelCase__ ): """simple docstring""" return self.tokenizer.decode(*UpperCamelCase__, **UpperCamelCase__ ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', UpperCamelCase__, ) return self.image_processor_class @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''', UpperCamelCase__, ) return self.image_processor
167
0
import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class lowercase : def __init__( self , snake_case , snake_case=13 , snake_case=30 , snake_case=2 , snake_case=3 , snake_case=True , snake_case=True , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=10 , snake_case=0.02 , snake_case=3 , snake_case=None , snake_case=2 , ): snake_case_ = parent snake_case_ = batch_size snake_case_ = image_size snake_case_ = patch_size snake_case_ = num_channels snake_case_ = is_training snake_case_ = use_labels snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = type_sequence_label_size snake_case_ = initializer_range snake_case_ = scope snake_case_ = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) snake_case_ = (image_size // patch_size) ** 2 snake_case_ = num_patches + 2 def a ( self ): snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ = None if self.use_labels: snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ = self.get_config() return config, pixel_values, labels def a ( self ): return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def a ( self , snake_case , snake_case , snake_case ): snake_case_ = DeiTModel(config=snake_case ) model.to(snake_case ) model.eval() snake_case_ = model(snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a ( self , snake_case , snake_case , snake_case ): snake_case_ = DeiTForMaskedImageModeling(config=snake_case ) model.to(snake_case ) model.eval() snake_case_ = model(snake_case ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images snake_case_ = 1 snake_case_ = DeiTForMaskedImageModeling(snake_case ) model.to(snake_case ) model.eval() snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case_ = model(snake_case ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def a ( self , snake_case , snake_case , snake_case ): snake_case_ = self.type_sequence_label_size snake_case_ = DeiTForImageClassification(snake_case ) model.to(snake_case ) model.eval() snake_case_ = model(snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images snake_case_ = 1 snake_case_ = DeiTForImageClassification(snake_case ) model.to(snake_case ) model.eval() snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case_ = model(snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def a ( self ): snake_case_ = self.prepare_config_and_inputs() ( ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ) = config_and_inputs snake_case_ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class lowercase ( lowercase_ , lowercase_ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[str] = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) __SCREAMING_SNAKE_CASE : Optional[int] = ( { '''feature-extraction''': DeiTModel, '''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) __SCREAMING_SNAKE_CASE : Dict = False __SCREAMING_SNAKE_CASE : int = False __SCREAMING_SNAKE_CASE : List[Any] = False def a ( self ): snake_case_ = DeiTModelTester(self ) snake_case_ = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 ) def a ( self ): self.config_tester.run_common_tests() @unittest.skip(reason='DeiT does not use inputs_embeds' ) def a ( self ): pass def a ( self ): snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ = model_class(snake_case ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) snake_case_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) ) def a ( self ): snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ = model_class(snake_case ) snake_case_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ = [*signature.parameters.keys()] snake_case_ = ['pixel_values'] self.assertListEqual(arg_names[:1] , snake_case ) def a ( self ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def a ( self ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*snake_case ) def a ( self ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case ) def a ( self , snake_case , snake_case , snake_case=False ): snake_case_ = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def a ( self ): if not self.model_tester.is_training: return snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(snake_case ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue snake_case_ = model_class(snake_case ) model.to(snake_case ) model.train() snake_case_ = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case ) snake_case_ = model(**snake_case ).loss loss.backward() def a ( self ): snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return snake_case_ = False snake_case_ = True for model_class in self.all_model_classes: if model_class in get_values(snake_case ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue snake_case_ = model_class(snake_case ) model.gradient_checkpointing_enable() model.to(snake_case ) model.train() snake_case_ = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case ) snake_case_ = model(**snake_case ).loss loss.backward() def a ( self ): snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ = [ {'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float}, {'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long}, {'title': 'regression', 'num_labels': 1, 'dtype': torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(snake_case ), *get_values(snake_case ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F'''Testing {model_class} with {problem_type["title"]}''' ): snake_case_ = problem_type['title'] snake_case_ = problem_type['num_labels'] snake_case_ = model_class(snake_case ) model.to(snake_case ) model.train() snake_case_ = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case ) if problem_type["num_labels"] > 1: snake_case_ = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] ) snake_case_ = inputs['labels'].to(problem_type['dtype'] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=snake_case ) as warning_list: snake_case_ = model(**snake_case ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( F'''Something is going wrong in the regression problem: intercepted {w.message}''' ) loss.backward() @slow def a ( self ): for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ = DeiTModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) def __lowerCamelCase ( ): '''simple docstring''' snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class lowercase ( unittest.TestCase ): @cached_property def a ( self ): return ( DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' ) if is_vision_available() else None ) @slow def a ( self ): snake_case_ = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ).to( snake_case ) snake_case_ = self.default_image_processor snake_case_ = prepare_img() snake_case_ = image_processor(images=snake_case , return_tensors='pt' ).to(snake_case ) # forward pass with torch.no_grad(): snake_case_ = model(**snake_case ) # verify the logits snake_case_ = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , snake_case ) snake_case_ = torch.tensor([-1.02_66, 0.19_12, -1.28_61] ).to(snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def a ( self ): snake_case_ = DeiTModel.from_pretrained( 'facebook/deit-base-distilled-patch16-224' , torch_dtype=torch.floataa , device_map='auto' ) snake_case_ = self.default_image_processor snake_case_ = prepare_img() snake_case_ = image_processor(images=snake_case , return_tensors='pt' ) snake_case_ = inputs.pixel_values.to(snake_case ) # forward pass to make sure inference works in fp16 with torch.no_grad(): snake_case_ = model(snake_case )
285
from abc import ABC, abstractmethod from argparse import ArgumentParser class lowercase ( lowercase_ ): @staticmethod @abstractmethod def a ( snake_case ): raise NotImplementedError() @abstractmethod def a ( self ): raise NotImplementedError()
285
1
from ..utils import DummyObject, requires_backends class UpperCamelCase_ ( metaclass=_lowerCamelCase ): lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Dict: requires_backends(self , ['sentencepiece'] ) class UpperCamelCase_ ( metaclass=_lowerCamelCase ): lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Any: requires_backends(self , ['sentencepiece'] ) class UpperCamelCase_ ( metaclass=_lowerCamelCase ): lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[str]: requires_backends(self , ['sentencepiece'] ) class UpperCamelCase_ ( metaclass=_lowerCamelCase ): lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> str: requires_backends(self , ['sentencepiece'] ) class UpperCamelCase_ ( metaclass=_lowerCamelCase ): lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Any: requires_backends(self , ['sentencepiece'] ) class UpperCamelCase_ ( metaclass=_lowerCamelCase ): lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]: requires_backends(self , ['sentencepiece'] ) class UpperCamelCase_ ( metaclass=_lowerCamelCase ): lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]: requires_backends(self , ['sentencepiece'] ) class UpperCamelCase_ ( metaclass=_lowerCamelCase ): lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[str]: requires_backends(self , ['sentencepiece'] ) class UpperCamelCase_ ( metaclass=_lowerCamelCase ): lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[int]: requires_backends(self , ['sentencepiece'] ) class UpperCamelCase_ ( metaclass=_lowerCamelCase ): lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[str]: requires_backends(self , ['sentencepiece'] ) class UpperCamelCase_ ( metaclass=_lowerCamelCase ): lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[Any]: requires_backends(self , ['sentencepiece'] ) class UpperCamelCase_ ( metaclass=_lowerCamelCase ): lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Dict: requires_backends(self , ['sentencepiece'] ) class UpperCamelCase_ ( metaclass=_lowerCamelCase ): lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> str: requires_backends(self , ['sentencepiece'] ) class UpperCamelCase_ ( metaclass=_lowerCamelCase ): lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[str]: requires_backends(self , ['sentencepiece'] ) class UpperCamelCase_ ( metaclass=_lowerCamelCase ): lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[str]: requires_backends(self , ['sentencepiece'] ) class UpperCamelCase_ ( metaclass=_lowerCamelCase ): lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[Any]: requires_backends(self , ['sentencepiece'] ) class UpperCamelCase_ ( metaclass=_lowerCamelCase ): lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[Any]: requires_backends(self , ['sentencepiece'] ) class UpperCamelCase_ ( metaclass=_lowerCamelCase ): lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> int: requires_backends(self , ['sentencepiece'] ) class UpperCamelCase_ ( metaclass=_lowerCamelCase ): lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Any: requires_backends(self , ['sentencepiece'] ) class UpperCamelCase_ ( metaclass=_lowerCamelCase ): lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> int: requires_backends(self , ['sentencepiece'] ) class UpperCamelCase_ ( metaclass=_lowerCamelCase ): lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Dict: requires_backends(self , ['sentencepiece'] ) class UpperCamelCase_ ( metaclass=_lowerCamelCase ): lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> int: requires_backends(self , ['sentencepiece'] ) class UpperCamelCase_ ( metaclass=_lowerCamelCase ): lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> int: requires_backends(self , ['sentencepiece'] ) class UpperCamelCase_ ( metaclass=_lowerCamelCase ): lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Union[str, Any]: requires_backends(self , ['sentencepiece'] ) class UpperCamelCase_ ( metaclass=_lowerCamelCase ): lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Tuple: requires_backends(self , ['sentencepiece'] ) class UpperCamelCase_ ( metaclass=_lowerCamelCase ): lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[str]: requires_backends(self , ['sentencepiece'] ) class UpperCamelCase_ ( metaclass=_lowerCamelCase ): lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Dict: requires_backends(self , ['sentencepiece'] ) class UpperCamelCase_ ( metaclass=_lowerCamelCase ): lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Union[str, Any]: requires_backends(self , ['sentencepiece'] ) class UpperCamelCase_ ( metaclass=_lowerCamelCase ): lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> int: requires_backends(self , ['sentencepiece'] ) class UpperCamelCase_ ( metaclass=_lowerCamelCase ): lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[int]: requires_backends(self , ['sentencepiece'] ) class UpperCamelCase_ ( metaclass=_lowerCamelCase ): lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[int]: requires_backends(self , ['sentencepiece'] )
295
import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCamelCase_ ( _lowerCamelCase , unittest.TestCase ): lowerCAmelCase_ = LEDTokenizer lowerCAmelCase_ = LEDTokenizerFast lowerCAmelCase_ = True def lowerCAmelCase ( self ) -> List[str]: super().setUp() _snake_case = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] _snake_case = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) ) _snake_case = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] _snake_case = {'unk_token': '<unk>'} _snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) _snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(lowerCAmelCase_ ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(lowerCAmelCase_ ) ) def lowerCAmelCase ( self , **lowerCAmelCase_ ) -> List[str]: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ) def lowerCAmelCase ( self , **lowerCAmelCase_ ) -> str: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]: return "lower newer", "lower newer" @cached_property def lowerCAmelCase ( self ) -> Optional[Any]: return LEDTokenizer.from_pretrained('allenai/led-base-16384' ) @cached_property def lowerCAmelCase ( self ) -> Union[str, Any]: return LEDTokenizerFast.from_pretrained('allenai/led-base-16384' ) @require_torch def lowerCAmelCase ( self ) -> Union[str, Any]: _snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] _snake_case = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _snake_case = tokenizer(lowerCAmelCase_ , max_length=len(lowerCAmelCase_ ) , padding=lowerCAmelCase_ , return_tensors='pt' ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) _snake_case = batch.input_ids.tolist()[0] self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) @require_torch def lowerCAmelCase ( self ) -> Optional[Any]: _snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='pt' ) self.assertIn('input_ids' , lowerCAmelCase_ ) self.assertIn('attention_mask' , lowerCAmelCase_ ) self.assertNotIn('labels' , lowerCAmelCase_ ) self.assertNotIn('decoder_attention_mask' , lowerCAmelCase_ ) @require_torch def lowerCAmelCase ( self ) -> Optional[int]: _snake_case = [ 'Summary of the text.', 'Another summary.', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _snake_case = tokenizer(text_target=lowerCAmelCase_ , max_length=32 , padding='max_length' , return_tensors='pt' ) self.assertEqual(32 , targets['input_ids'].shape[1] ) @require_torch def lowerCAmelCase ( self ) -> List[str]: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _snake_case = tokenizer( ['I am a small frog' * 1024, 'I am a small frog'] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='pt' ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual(batch.input_ids.shape , (2, 5122) ) @require_torch def lowerCAmelCase ( self ) -> Union[str, Any]: _snake_case = ['A long paragraph for summarization.'] _snake_case = [ 'Summary of the text.', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _snake_case = tokenizer(lowerCAmelCase_ , return_tensors='pt' ) _snake_case = tokenizer(text_target=lowerCAmelCase_ , return_tensors='pt' ) _snake_case = inputs['input_ids'] _snake_case = targets['input_ids'] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def lowerCAmelCase ( self ) -> List[str]: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _snake_case = ['Summary of the text.', 'Another summary.'] _snake_case = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] _snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ ) _snake_case = [[0] * len(lowerCAmelCase_ ) for x in encoded_output['input_ids']] _snake_case = tokenizer.pad(lowerCAmelCase_ ) self.assertSequenceEqual(outputs['global_attention_mask'] , lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> Tuple: pass def lowerCAmelCase ( self ) -> str: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) _snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) _snake_case = 'A, <mask> AllenNLP sentence.' _snake_case = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ ) _snake_case = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ ) self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) ) self.assertEqual( sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , ) _snake_case = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] ) _snake_case = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] ) self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual( lowerCAmelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) self.assertSequenceEqual( lowerCAmelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
295
1
'''simple docstring''' import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=snake_case_ ) class a_ ( snake_case_ ): '''simple docstring''' UpperCamelCase = field(default='''automatic-speech-recognition''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) UpperCamelCase = Features({'''audio''': Audio()} ) UpperCamelCase = Features({'''transcription''': Value('''string''' )} ) UpperCamelCase = '''audio''' UpperCamelCase = '''transcription''' def snake_case_( self , A ) -> Union[str, Any]: if self.audio_column not in features: raise ValueError(f'Column {self.audio_column} is not present in features.' ) if not isinstance(features[self.audio_column] , A ): raise ValueError(f'Column {self.audio_column} is not an Audio type.' ) _SCREAMING_SNAKE_CASE = copy.deepcopy(self ) _SCREAMING_SNAKE_CASE = self.input_schema.copy() _SCREAMING_SNAKE_CASE = features[self.audio_column] _SCREAMING_SNAKE_CASE = input_schema return task_template @property def snake_case_( self ) -> Dict[str, str]: return {self.audio_column: "audio", self.transcription_column: "transcription"}
58
import random class a__ : """simple docstring""" @staticmethod def UpperCamelCase ( lowercase ) -> tuple[list[int], list[int]]: '''simple docstring''' A__ = [ord(lowercase ) for i in text] A__ = [] A__ = [] for i in plain: A__ = random.randint(1 , 300 ) A__ = (i + k) * k cipher.append(lowercase ) key.append(lowercase ) return cipher, key @staticmethod def UpperCamelCase ( lowercase , lowercase ) -> str: '''simple docstring''' A__ = [] for i in range(len(lowercase ) ): A__ = int((cipher[i] - (key[i]) ** 2) / key[i] ) plain.append(chr(lowercase ) ) return "".join(lowercase ) if __name__ == "__main__": lowerCAmelCase__ , lowerCAmelCase__ = Onepad().encrypt("""Hello""") print(c, k) print(Onepad().decrypt(c, k))
68
0
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mvp import MvpTokenizer __snake_case : str = logging.get_logger(__name__) __snake_case : Optional[int] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} # See all MVP models at https://huggingface.co/models?filter=mvp __snake_case : Tuple = { 'vocab_file': { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json', }, 'added_tokens.json': { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json', }, 'merges_file': { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt', }, 'tokenizer_file': { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json', }, } __snake_case : int = { 'RUCAIBox/mvp': 1024, } class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = VOCAB_FILES_NAMES __snake_case = PRETRAINED_VOCAB_FILES_MAP __snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __snake_case = ['input_ids', 'attention_mask'] __snake_case = MvpTokenizer def __init__( self : List[str] , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Optional[int]="replace" , lowerCAmelCase_ : Dict="<s>" , lowerCAmelCase_ : Any="</s>" , lowerCAmelCase_ : Optional[Any]="</s>" , lowerCAmelCase_ : int="<s>" , lowerCAmelCase_ : int="<unk>" , lowerCAmelCase_ : Tuple="<pad>" , lowerCAmelCase_ : Union[str, Any]="<mask>" , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : Any=True , **lowerCAmelCase_ : Dict , ) -> Dict: '''simple docstring''' super().__init__( lowerCAmelCase_ , lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , errors=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ , **lowerCAmelCase_ , ) A__ : Dict =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , lowerCAmelCase_ ) != add_prefix_space: A__ : Optional[int] =getattr(lowerCAmelCase_ , pre_tok_state.pop("""type""" ) ) A__ : str =add_prefix_space A__ : List[str] =pre_tok_class(**lowerCAmelCase_ ) A__ : Union[str, Any] =add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` A__ : Tuple ="""post_processor""" A__ : Optional[int] =getattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_ ) if tokenizer_component_instance: A__ : Dict =json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: A__ : Dict =tuple(state["""sep"""] ) if "cls" in state: A__ : Dict =tuple(state["""cls"""] ) A__ : Any =False if state.get("""add_prefix_space""" , lowerCAmelCase_ ) != add_prefix_space: A__ : Optional[Any] =add_prefix_space A__ : Union[str, Any] =True if state.get("""trim_offsets""" , lowerCAmelCase_ ) != trim_offsets: A__ : Optional[Any] =trim_offsets A__ : str =True if changes_to_apply: A__ : Optional[int] =getattr(lowerCAmelCase_ , state.pop("""type""" ) ) A__ : List[str] =component_class(**lowerCAmelCase_ ) setattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_ ) @property def lowercase__ ( self : int ) -> str: '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error("""Using mask_token, but it is not set yet.""" ) return None return str(self._mask_token ) @mask_token.setter def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Optional[int] ) -> Tuple: '''simple docstring''' A__ : Any =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else value A__ : List[str] =value def lowercase__ ( self : Union[str, Any] , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : Optional[Any] ) -> BatchEncoding: '''simple docstring''' A__ : Union[str, Any] =kwargs.get("""is_split_into_words""" , lowerCAmelCase_ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " """to use it with pretokenized inputs.""" ) return super()._batch_encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_ ) def lowercase__ ( self : Any , *lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : str ) -> BatchEncoding: '''simple docstring''' A__ : Dict =kwargs.get("""is_split_into_words""" , lowerCAmelCase_ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " """to use it with pretokenized inputs.""" ) return super()._encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_ ) def lowercase__ ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' A__ : str =self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ ) return tuple(lowerCAmelCase_ ) def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str]=None ) -> List[str]: '''simple docstring''' A__ : int =[self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowercase__ ( self : int , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' A__ : List[Any] =[self.sep_token_id] A__ : Union[str, Any] =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
371
'''simple docstring''' import warnings from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = ['image_processor', 'tokenizer'] __snake_case = 'FlavaImageProcessor' __snake_case = ('BertTokenizer', 'BertTokenizerFast') def __init__( self : Optional[int] , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : Optional[Any] ) -> List[str]: '''simple docstring''' A__ : Any =None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , lowerCAmelCase_ , ) A__ : Optional[Any] =kwargs.pop("""feature_extractor""" ) A__ : List[Any] =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(lowerCAmelCase_ , lowerCAmelCase_ ) A__ : int =self.image_processor def __call__( self : Union[str, Any] , lowerCAmelCase_ : Optional[ImageInput] = None , lowerCAmelCase_ : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase_ : Union[bool, str, TruncationStrategy] = False , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , **lowerCAmelCase_ : Optional[int] , ) -> str: '''simple docstring''' if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: A__ : int =self.tokenizer( text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , ) if images is not None: A__ : List[str] =self.image_processor( lowerCAmelCase_ , return_image_mask=lowerCAmelCase_ , return_codebook_pixels=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , ) if text is not None and images is not None: encoding.update(lowerCAmelCase_ ) return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowerCAmelCase_ ) , tensor_type=lowerCAmelCase_ ) def lowercase__ ( self : Any , *lowerCAmelCase_ : str , **lowerCAmelCase_ : Optional[int] ) -> str: '''simple docstring''' return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ ) def lowercase__ ( self : List[Any] , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : Any ) -> Tuple: '''simple docstring''' return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ ) @property def lowercase__ ( self : Tuple ) -> Tuple: '''simple docstring''' A__ : Any =self.tokenizer.model_input_names A__ : Optional[Any] =self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def lowercase__ ( self : Tuple ) -> int: '''simple docstring''' warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowerCAmelCase_ , ) return self.image_processor_class @property def lowercase__ ( self : Tuple ) -> Dict: '''simple docstring''' warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowerCAmelCase_ , ) return self.image_processor
136
0
"""simple docstring""" from __future__ import annotations import copy import tempfile import unittest from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available from transformers.testing_utils import ( DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tensorflow_probability, require_tf, slow, ) from ..bert.test_modeling_bert import BertModelTester if is_tf_available(): from transformers import ( TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelForTableQuestionAnswering, TFAutoModelForTokenClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFFunnelBaseModel, TFFunnelModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, TFTapasForQuestionAnswering, ) from transformers.models.auto.modeling_tf_auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_MAPPING, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase_ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase : Dict = """new-model""" if is_tf_available(): class lowerCAmelCase_ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase : Union[str, Any] = NewModelConfig @require_tf class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" @slow def snake_case ( self ): """simple docstring""" snake_case = 'bert-base-cased' snake_case = AutoConfig.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) snake_case = TFAutoModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) @slow def snake_case ( self ): """simple docstring""" snake_case = 'bert-base-cased' snake_case = AutoConfig.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) snake_case = TFAutoModelForPreTraining.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) @slow def snake_case ( self ): """simple docstring""" for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case = AutoConfig.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) snake_case = TFAutoModelForCausalLM.from_pretrained(lowerCAmelCase ) snake_case ,snake_case = TFAutoModelForCausalLM.from_pretrained(lowerCAmelCase , output_loading_info=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) @slow def snake_case ( self ): """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case = AutoConfig.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) snake_case = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) @slow def snake_case ( self ): """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case = AutoConfig.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) snake_case = TFAutoModelForMaskedLM.from_pretrained(lowerCAmelCase ) snake_case ,snake_case = TFAutoModelForMaskedLM.from_pretrained(lowerCAmelCase , output_loading_info=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) @slow def snake_case ( self ): """simple docstring""" for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case = AutoConfig.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) snake_case = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase ) snake_case ,snake_case = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase , output_loading_info=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) @slow def snake_case ( self ): """simple docstring""" for model_name in ["bert-base-uncased"]: snake_case = AutoConfig.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) snake_case = TFAutoModelForSequenceClassification.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) @slow def snake_case ( self ): """simple docstring""" for model_name in ["bert-base-uncased"]: snake_case = AutoConfig.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) snake_case = TFAutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) @slow @require_tensorflow_probability def snake_case ( self ): """simple docstring""" for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]: snake_case = AutoConfig.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) snake_case = TFAutoModelForTableQuestionAnswering.from_pretrained(lowerCAmelCase ) snake_case ,snake_case = TFAutoModelForTableQuestionAnswering.from_pretrained( lowerCAmelCase , output_loading_info=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) def snake_case ( self ): """simple docstring""" snake_case = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase ) , 1_44_10 ) def snake_case ( self ): """simple docstring""" snake_case = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase ) , 1_44_10 ) def snake_case ( self ): """simple docstring""" snake_case = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' ) self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) snake_case = copy.deepcopy(model.config ) snake_case = ['FunnelBaseModel'] snake_case = TFAutoModel.from_config(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(lowerCAmelCase ) snake_case = TFAutoModel.from_pretrained(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) def snake_case ( self ): """simple docstring""" try: AutoConfig.register('new-model' , lowerCAmelCase ) snake_case = [ TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSequenceClassification, TFAutoModelForTokenClassification, ] for auto_class in auto_classes: with self.subTest(auto_class.__name__ ): # Wrong config class will raise an error with self.assertRaises(lowerCAmelCase ): auto_class.register(lowerCAmelCase , lowerCAmelCase ) auto_class.register(lowerCAmelCase , lowerCAmelCase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowerCAmelCase ): auto_class.register(lowerCAmelCase , lowerCAmelCase ) # Now that the config is registered, it can be used as any other config with the auto-API snake_case = BertModelTester(self ).get_config() snake_case = NewModelConfig(**tiny_config.to_dict() ) snake_case = auto_class.from_config(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(lowerCAmelCase ) snake_case = auto_class.from_pretrained(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"] for mapping in ( TF_MODEL_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, ): if NewModelConfig in mapping._extra_content: del mapping._extra_content[NewModelConfig] def snake_case ( self ): """simple docstring""" with self.assertRaisesRegex( lowerCAmelCase , 'bert-base is not a local folder and is not a valid model identifier' ): snake_case = TFAutoModel.from_pretrained('bert-base' ) def snake_case ( self ): """simple docstring""" with self.assertRaisesRegex( lowerCAmelCase , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ): snake_case = TFAutoModel.from_pretrained(lowerCAmelCase , revision='aaaaaa' ) def snake_case ( self ): """simple docstring""" with self.assertRaisesRegex( lowerCAmelCase , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ): snake_case = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' ) def snake_case ( self ): """simple docstring""" with self.assertRaisesRegex(lowerCAmelCase , 'Use `from_pt=True` to load this model' ): snake_case = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' ) def snake_case ( self ): """simple docstring""" snake_case = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' ) with RequestCounter() as counter: snake_case = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 ) # With a sharded checkpoint snake_case = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' ) with RequestCounter() as counter: snake_case = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
150
"""simple docstring""" import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } SCREAMING_SNAKE_CASE__ = { "vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"}, "merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"}, "tokenizer_config_file": { "facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json" }, } SCREAMING_SNAKE_CASE__ = {"facebook/blenderbot-3B": 128} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def lowerCAmelCase__ ( ) -> str: """simple docstring""" snake_case = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) snake_case = bs[:] snake_case = 0 for b in range(2**8 ): if b not in bs: bs.append(_UpperCamelCase ) cs.append(2**8 + n ) n += 1 snake_case = [chr(_UpperCamelCase ) for n in cs] return dict(zip(_UpperCamelCase , _UpperCamelCase ) ) def lowerCAmelCase__ ( _UpperCamelCase : List[str] ) -> Union[str, Any]: """simple docstring""" snake_case = set() snake_case = word[0] for char in word[1:]: pairs.add((prev_char, char) ) snake_case = char return pairs class lowerCAmelCase_ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase : str = VOCAB_FILES_NAMES _lowerCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP _lowerCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCAmelCase : Optional[Any] = ["""input_ids""", """attention_mask"""] def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase="replace" , lowerCAmelCase="<s>" , lowerCAmelCase="</s>" , lowerCAmelCase="</s>" , lowerCAmelCase="<s>" , lowerCAmelCase="<unk>" , lowerCAmelCase="<pad>" , lowerCAmelCase="<mask>" , lowerCAmelCase=False , **lowerCAmelCase , ): """simple docstring""" snake_case = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else bos_token snake_case = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else eos_token snake_case = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else sep_token snake_case = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else cls_token snake_case = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else unk_token snake_case = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it snake_case = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else mask_token super().__init__( errors=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , add_prefix_space=lowerCAmelCase , **lowerCAmelCase , ) with open(lowerCAmelCase , encoding='utf-8' ) as vocab_handle: snake_case = json.load(lowerCAmelCase ) snake_case = {v: k for k, v in self.encoder.items()} snake_case = errors # how to handle errors in decoding snake_case = bytes_to_unicode() snake_case = {v: k for k, v in self.byte_encoder.items()} with open(lowerCAmelCase , encoding='utf-8' ) as merges_handle: snake_case = merges_handle.read().split('\n' )[1:-1] snake_case = [tuple(merge.split() ) for merge in bpe_merges] snake_case = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) ) snake_case = {} snake_case = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions snake_case = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def snake_case ( self ): """simple docstring""" return len(self.encoder ) def snake_case ( self ): """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def snake_case ( self , lowerCAmelCase ): """simple docstring""" if token in self.cache: return self.cache[token] snake_case = tuple(lowerCAmelCase ) snake_case = get_pairs(lowerCAmelCase ) if not pairs: return token while True: snake_case = min(lowerCAmelCase , key=lambda lowerCAmelCase : self.bpe_ranks.get(lowerCAmelCase , float('inf' ) ) ) if bigram not in self.bpe_ranks: break snake_case ,snake_case = bigram snake_case = [] snake_case = 0 while i < len(lowerCAmelCase ): try: snake_case = word.index(lowerCAmelCase , lowerCAmelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) snake_case = j if word[i] == first and i < len(lowerCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 snake_case = tuple(lowerCAmelCase ) snake_case = new_word if len(lowerCAmelCase ) == 1: break else: snake_case = get_pairs(lowerCAmelCase ) snake_case = ' '.join(lowerCAmelCase ) snake_case = word return word def snake_case ( self , lowerCAmelCase ): """simple docstring""" snake_case = [] for token in re.findall(self.pat , lowerCAmelCase ): snake_case = ''.join( self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase ).split(' ' ) ) return bpe_tokens def snake_case ( self , lowerCAmelCase ): """simple docstring""" return self.encoder.get(lowerCAmelCase , self.encoder.get(self.unk_token ) ) def snake_case ( self , lowerCAmelCase ): """simple docstring""" return self.decoder.get(lowerCAmelCase ) def snake_case ( self , lowerCAmelCase ): """simple docstring""" snake_case = ''.join(lowerCAmelCase ) snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors ) return text def snake_case ( self , lowerCAmelCase , lowerCAmelCase = None ): """simple docstring""" if not os.path.isdir(lowerCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return snake_case = os.path.join( lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) snake_case = os.path.join( lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase , ensure_ascii=lowerCAmelCase ) + '\n' ) snake_case = 0 with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase : kv[1] ): if index != token_index: logger.warning( F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ' Please check that the tokenizer is not corrupted!' ) snake_case = token_index writer.write(' '.join(lowerCAmelCase ) + '\n' ) index += 1 return vocab_file, merge_file def snake_case ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(lowerCAmelCase )) + [1] return [1] + ([0] * len(lowerCAmelCase )) + [1, 1] + ([0] * len(lowerCAmelCase )) + [1] def snake_case ( self , lowerCAmelCase , lowerCAmelCase = None ): """simple docstring""" snake_case = [self.sep_token_id] snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def snake_case ( self , lowerCAmelCase , lowerCAmelCase=False , **lowerCAmelCase ): """simple docstring""" snake_case = kwargs.pop('add_prefix_space' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase ) > 0 and not text[0].isspace()): snake_case = ' ' + text return (text, kwargs) def snake_case ( self , lowerCAmelCase , lowerCAmelCase = None ): """simple docstring""" return token_ids_a + [self.eos_token_id] def snake_case ( self , lowerCAmelCase ): """simple docstring""" snake_case = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(' ' + text ) else: # Generated responses should contain them already. inputs.append(lowerCAmelCase ) snake_case = ' '.join(lowerCAmelCase ) snake_case = self.encode(lowerCAmelCase ) if len(lowerCAmelCase ) > self.model_max_length: snake_case = input_ids[-self.model_max_length :] logger.warning(F"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" ) return input_ids
150
1
import unittest from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin _UpperCAmelCase : Any = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class lowercase ( lowerCamelCase__ , unittest.TestCase ): __lowercase : List[Any] = XLNetTokenizer __lowercase : Tuple = XLNetTokenizerFast __lowercase : int = True __lowercase : List[str] = True def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing UpperCamelCase = XLNetTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase ) tokenizer.sanitize_special_tokens() tokenizer.save_pretrained(self.tmpdirname ) def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = '''<s>''' UpperCamelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase ) def __UpperCamelCase ( self ) -> Dict: """simple docstring""" UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<unk>' ) self.assertEqual(vocab_keys[1] , '<s>' ) self.assertEqual(vocab_keys[-1] , '<eod>' ) self.assertEqual(len(__lowerCamelCase ) , 1_006 ) def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1_000 ) def __UpperCamelCase ( self ) -> Any: """simple docstring""" UpperCamelCase = XLNetTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase ) UpperCamelCase = tokenizer.tokenize('This is a test' ) self.assertListEqual(__lowerCamelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [285, 46, 10, 170, 382] ) UpperCamelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( __lowerCamelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) UpperCamelCase = tokenizer.convert_tokens_to_ids(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] ) UpperCamelCase = tokenizer.convert_ids_to_tokens(__lowerCamelCase ) self.assertListEqual( __lowerCamelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = XLNetTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase ) UpperCamelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( __lowerCamelCase , [ SPIECE_UNDERLINE + '', 'i', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 'se', '.', ] , ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['▁he', 'll', 'o'] ) def __UpperCamelCase ( self ) -> Dict: """simple docstring""" UpperCamelCase = XLNetTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase ) UpperCamelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( __lowerCamelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 'se', '.', ] , ) @slow def __UpperCamelCase ( self ) -> Any: """simple docstring""" UpperCamelCase = XLNetTokenizer.from_pretrained('xlnet-base-cased' ) UpperCamelCase = tokenizer.encode('sequence builders' , add_special_tokens=__lowerCamelCase ) UpperCamelCase = tokenizer.encode('multi-sequence build' , add_special_tokens=__lowerCamelCase ) UpperCamelCase = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase ) UpperCamelCase = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase ) assert encoded_sentence == text + [4, 3] assert encoded_pair == text + [4] + text_a + [4, 3] @slow def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = {'''input_ids''': [[17, 21_442, 270, 17, 10, 14_645, 318, 34, 17, 4_546, 3_145, 787, 13, 7_752, 22_018, 23, 21, 17, 4_546, 3_145, 787, 13, 3_352, 14_431, 13, 5_500, 11, 1_176, 580, 13, 16_819, 4_797, 23, 17, 10, 17_135, 658, 19, 457, 7_932, 13, 184, 19, 3_154, 17_135, 6_468, 19, 1_404, 12_269, 19, 4_229, 5_356, 16_264, 46, 19, 17, 20_545, 10_395, 9, 9, 9, 11, 28, 6_421, 9_531, 20_729, 17, 10, 353, 17_022, 11, 21, 6_421, 9_531, 16_949, 17, 10, 11_509, 753, 11, 33, 95, 2_421, 7_385, 956, 14_431, 2_626, 25, 842, 7_385, 4_836, 21, 1_429, 2_272, 9_855, 3_120, 161, 24_738, 19, 13_203, 658, 218, 787, 21, 430, 18_482, 847, 2_637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22_178, 27, 1_064, 22, 956, 13, 11_101, 1_429, 5_854, 24_313, 18_953, 40, 422, 24_366, 68, 1_758, 37, 10_483, 14_257, 31, 207, 263, 21, 203, 3_773, 25, 71, 9_735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2_049, 3_442, 17, 13_894, 3_380, 23, 95, 18, 17_634, 2_288, 9, 4, 3]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__lowerCamelCase , model_name='xlnet-base-cased' , revision='c841166438c31ec7ca9a106dee7bb312b73ae511' , )
356
from __future__ import annotations def A ( lowercase , lowercase , lowercase , lowercase ) -> list: '''simple docstring''' UpperCamelCase = [] UpperCamelCase , UpperCamelCase = input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0 ) ) UpperCamelCase = result + left + right return input_list def A ( lowercase ) -> list: '''simple docstring''' if len(lowercase ) <= 1: return input_list UpperCamelCase = list(lowercase ) # iteration for two-way merging UpperCamelCase = 2 while p <= len(lowercase ): # getting low, high and middle value for merge-sort of single list for i in range(0 , len(lowercase ) , lowercase ): UpperCamelCase = i UpperCamelCase = i + p - 1 UpperCamelCase = (low + high + 1) // 2 UpperCamelCase = merge(lowercase , lowercase , lowercase , lowercase ) # final merge of last two parts if p * 2 >= len(lowercase ): UpperCamelCase = i UpperCamelCase = merge(lowercase , 0 , lowercase , len(lowercase ) - 1 ) break p *= 2 return input_list if __name__ == "__main__": _UpperCAmelCase : Any = input("Enter numbers separated by a comma:\n").strip() if user_input == "": _UpperCAmelCase : Optional[Any] = [] else: _UpperCAmelCase : Any = [int(item.strip()) for item in user_input.split(",")] print(iter_merge_sort(unsorted))
110
0
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging __UpperCAmelCase = logging.get_logger(__name__) def A__ ( __lowerCamelCase ): SCREAMING_SNAKE_CASE_ = r'''\w+[.]\d+''' SCREAMING_SNAKE_CASE_ = re.findall(__lowerCamelCase, __lowerCamelCase ) for pat in pats: SCREAMING_SNAKE_CASE_ = key.replace(__lowerCamelCase, '''_'''.join(pat.split('''.''' ) ) ) return key def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): SCREAMING_SNAKE_CASE_ = pt_tuple_key[:-1] + ('''scale''',) if ( any('''norm''' in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): SCREAMING_SNAKE_CASE_ = pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: SCREAMING_SNAKE_CASE_ = pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: SCREAMING_SNAKE_CASE_ = pt_tuple_key[:-1] + ('''embedding''',) return renamed_pt_tuple_key, pt_tensor # conv layer SCREAMING_SNAKE_CASE_ = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: SCREAMING_SNAKE_CASE_ = pt_tensor.transpose(2, 3, 1, 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer SCREAMING_SNAKE_CASE_ = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight": SCREAMING_SNAKE_CASE_ = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight SCREAMING_SNAKE_CASE_ = pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias SCREAMING_SNAKE_CASE_ = pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=42 ): # Step 1: Convert pytorch tensor to numpy SCREAMING_SNAKE_CASE_ = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params SCREAMING_SNAKE_CASE_ = flax_model.init_weights(PRNGKey(__lowerCamelCase ) ) SCREAMING_SNAKE_CASE_ = flatten_dict(__lowerCamelCase ) SCREAMING_SNAKE_CASE_ = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): SCREAMING_SNAKE_CASE_ = rename_key(__lowerCamelCase ) SCREAMING_SNAKE_CASE_ = tuple(renamed_pt_key.split('''.''' ) ) # Correctly rename weight parameters SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = rename_key_and_reshape_tensor(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # also add unexpected weight so that warning is thrown SCREAMING_SNAKE_CASE_ = jnp.asarray(__lowerCamelCase ) return unflatten_dict(__lowerCamelCase )
299
from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def A__ ( __lowerCamelCase ): SCREAMING_SNAKE_CASE_ = int(number**0.5 ) return number == sq * sq def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): SCREAMING_SNAKE_CASE_ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den SCREAMING_SNAKE_CASE_ = x_den * y_den * z_den SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase ) top //= hcf bottom //= hcf return top, bottom def A__ ( __lowerCamelCase = 35 ): SCREAMING_SNAKE_CASE_ = set() SCREAMING_SNAKE_CASE_ = 42 SCREAMING_SNAKE_CASE_ = Fraction(0 ) SCREAMING_SNAKE_CASE_ = 42 for x_num in range(1, order + 1 ): for x_den in range(x_num + 1, order + 1 ): for y_num in range(1, order + 1 ): for y_den in range(y_num + 1, order + 1 ): # n=1 SCREAMING_SNAKE_CASE_ = x_num * y_den + x_den * y_num SCREAMING_SNAKE_CASE_ = x_den * y_den SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: SCREAMING_SNAKE_CASE_ = add_three( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) unique_s.add(__lowerCamelCase ) # n=2 SCREAMING_SNAKE_CASE_ = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) SCREAMING_SNAKE_CASE_ = x_den * x_den * y_den * y_den if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ): SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) ) SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) ) SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: SCREAMING_SNAKE_CASE_ = add_three( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) unique_s.add(__lowerCamelCase ) # n=-1 SCREAMING_SNAKE_CASE_ = x_num * y_num SCREAMING_SNAKE_CASE_ = x_den * y_num + x_num * y_den SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: SCREAMING_SNAKE_CASE_ = add_three( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) unique_s.add(__lowerCamelCase ) # n=2 SCREAMING_SNAKE_CASE_ = x_num * x_num * y_num * y_num SCREAMING_SNAKE_CASE_ = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ): SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) ) SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) ) SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: SCREAMING_SNAKE_CASE_ = add_three( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) unique_s.add(__lowerCamelCase ) for num, den in unique_s: total += Fraction(__lowerCamelCase, __lowerCamelCase ) return total.denominator + total.numerator if __name__ == "__main__": print(F"""{solution() = }""")
299
1
"""simple docstring""" import collections import json import math import os import re import time from fnmatch import fnmatch from typing import Dict import requests from slack_sdk import WebClient lowercase__ : Dict = WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""]) def UpperCamelCase_ ( lowerCAmelCase__ : Dict ) -> Optional[Any]: """simple docstring""" lowerCAmelCase_ : Dict = test_results.split(' ' ) lowerCAmelCase_ : List[Any] = 0 lowerCAmelCase_ : List[str] = 0 # When the output is short enough, the output is surrounded by = signs: "== OUTPUT ==" # When it is too long, those signs are not present. lowerCAmelCase_ : List[str] = expressions[-2] if '=' in expressions[-1] else expressions[-1] for i, expression in enumerate(lowerCAmelCase__ ): if "failed" in expression: failed += int(expressions[i - 1] ) if "passed" in expression: success += int(expressions[i - 1] ) return failed, success, time_spent def UpperCamelCase_ ( lowerCAmelCase__ : Optional[int] ) -> List[str]: """simple docstring""" lowerCAmelCase_ : int = {} lowerCAmelCase_ : Optional[Any] = None lowerCAmelCase_ : Any = False for line in failures_short_lines.split('\n' ): if re.search(R'_ \[doctest\]' , lowerCAmelCase__ ): lowerCAmelCase_ : Any = True lowerCAmelCase_ : Optional[int] = line.split(' ' )[2] elif in_error and not line.split(' ' )[0].isdigit(): lowerCAmelCase_ : Dict = line lowerCAmelCase_ : str = False return failures class UpperCamelCase__ : """simple docstring""" def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict ): lowerCAmelCase_ : str = title lowerCAmelCase_ : str = doc_test_results['time_spent'].split(',' )[0] lowerCAmelCase_ : List[str] = doc_test_results['success'] lowerCAmelCase_ : Optional[int] = doc_test_results['failures'] lowerCAmelCase_ : Any = self.n_success + self.n_failures # Failures and success of the modeling tests lowerCAmelCase_ : List[Any] = doc_test_results @property def SCREAMING_SNAKE_CASE__ ( self : Dict ): lowerCAmelCase_ : Dict = [self._time_spent] lowerCAmelCase_ : Any = 0 for time in time_spent: lowerCAmelCase_ : Any = time.split(':' ) # Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute. if len(__UpperCAmelCase ) == 1: lowerCAmelCase_ : int = [0, 0, time_parts[0]] lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ : Union[str, Any] = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] ) total_secs += hours * 3_6_0_0 + minutes * 6_0 + seconds lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ : List[Any] = total_secs // 3_6_0_0, (total_secs % 3_6_0_0) // 6_0, total_secs % 6_0 return F"{int(__UpperCAmelCase )}h{int(__UpperCAmelCase )}m{int(__UpperCAmelCase )}s" @property def SCREAMING_SNAKE_CASE__ ( self : str ): return {"type": "header", "text": {"type": "plain_text", "text": self.title}} @property def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): return { "type": "section", "text": { "type": "plain_text", "text": F"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.", "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } @property def SCREAMING_SNAKE_CASE__ ( self : Dict ): return { "type": "section", "text": { "type": "plain_text", "text": ( F"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in" F" {self.time}." ), "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): lowerCAmelCase_ : str = 4_0 lowerCAmelCase_ : List[str] = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(__UpperCAmelCase , __UpperCAmelCase )} lowerCAmelCase_ : Tuple = '' for category, failures in category_failures.items(): if len(__UpperCAmelCase ) == 0: continue if report != "": report += "\n\n" report += F"*{category} failures*:".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n" report += "`" report += "`\n`".join(__UpperCAmelCase ) report += "`" return { "type": "section", "text": { "type": "mrkdwn", "text": F"The following examples had failures:\n\n\n{report}\n", }, } @property def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): lowerCAmelCase_ : Union[str, Any] = [self.header] if self.n_failures > 0: blocks.append(self.failures ) if self.n_failures > 0: blocks.extend([self.category_failures] ) if self.n_failures == 0: blocks.append(self.no_failures ) return json.dumps(__UpperCAmelCase ) @staticmethod def SCREAMING_SNAKE_CASE__ ( ): lowerCAmelCase_ : Tuple = [ { 'type': 'section', 'text': { 'type': 'plain_text', 'text': 'There was an issue running the tests.', }, 'accessory': { 'type': 'button', 'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True}, 'url': F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } ] print('Sending the following payload' ) print(json.dumps({'blocks': json.loads(__UpperCAmelCase )} ) ) client.chat_postMessage( channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=__UpperCAmelCase , ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): print('Sending the following payload' ) print(json.dumps({'blocks': json.loads(self.payload )} ) ) lowerCAmelCase_ : Any = F"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else 'All tests passed.' lowerCAmelCase_ : Dict = client.chat_postMessage( channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=__UpperCAmelCase , ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] ): lowerCAmelCase_ : Optional[int] = '' for key, value in failures.items(): lowerCAmelCase_ : str = value[:2_0_0] + ' [Truncated]' if len(__UpperCAmelCase ) > 2_5_0 else value failures_text += F"*{key}*\n_{value}_\n\n" lowerCAmelCase_ : str = job_name lowerCAmelCase_ : Union[str, Any] = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}} if job_link is not None: lowerCAmelCase_ : Tuple = { 'type': 'button', 'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True}, 'url': job_link, } return [ {"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}}, content, {"type": "section", "text": {"type": "mrkdwn", "text": failures_text}}, ] def SCREAMING_SNAKE_CASE__ ( self : int ): if self.thread_ts is None: raise ValueError('Can only post reply if a post has been made.' ) lowerCAmelCase_ : str = self.doc_test_results.pop('job_link' ) self.doc_test_results.pop('failures' ) self.doc_test_results.pop('success' ) self.doc_test_results.pop('time_spent' ) lowerCAmelCase_ : Dict = sorted(self.doc_test_results.items() , key=lambda SCREAMING_SNAKE_CASE_ : t[0] ) for job, job_result in sorted_dict: if len(job_result['failures'] ): lowerCAmelCase_ : List[str] = F"*Num failures* :{len(job_result['failed'] )} \n" lowerCAmelCase_ : List[str] = job_result['failures'] lowerCAmelCase_ : Any = self.get_reply_blocks(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , text=__UpperCAmelCase ) print('Sending the following reply' ) print(json.dumps({'blocks': blocks} ) ) client.chat_postMessage( channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=F"Results for {job}" , blocks=__UpperCAmelCase , thread_ts=self.thread_ts['ts'] , ) time.sleep(1 ) def UpperCamelCase_ ( ) -> Tuple: """simple docstring""" lowerCAmelCase_ : Optional[int] = os.environ['GITHUB_RUN_ID'] lowerCAmelCase_ : int = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100" lowerCAmelCase_ : List[Any] = requests.get(lowerCAmelCase__ ).json() lowerCAmelCase_ : Optional[int] = {} try: jobs.update({job['name']: job['html_url'] for job in result['jobs']} ) lowerCAmelCase_ : str = math.ceil((result['total_count'] - 100) / 100 ) for i in range(lowerCAmelCase__ ): lowerCAmelCase_ : List[Any] = requests.get(url + f"&page={i + 2}" ).json() jobs.update({job['name']: job['html_url'] for job in result['jobs']} ) return jobs except Exception as e: print('Unknown error, could not fetch links.' , lowerCAmelCase__ ) return {} def UpperCamelCase_ ( lowerCAmelCase__ : str ) -> Dict: """simple docstring""" lowerCAmelCase_ : Any = {} if os.path.exists(lowerCAmelCase__ ): lowerCAmelCase_ : int = os.listdir(lowerCAmelCase__ ) for file in files: try: with open(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , encoding='utf-8' ) as f: lowerCAmelCase_ : Dict = f.read() except UnicodeDecodeError as e: raise ValueError(f"Could not open {os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )}." ) from e return _artifact def UpperCamelCase_ ( ) -> int: """simple docstring""" class UpperCamelCase__ : """simple docstring""" def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ): lowerCAmelCase_ : int = name lowerCAmelCase_ : List[str] = [] def __str__( self : int ): return self.name def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : str ): self.paths.append({'name': self.name, 'path': path} ) lowerCAmelCase_ : int = {} lowerCAmelCase_ : str = filter(os.path.isdir , os.listdir() ) for directory in directories: lowerCAmelCase_ : Dict = directory if artifact_name not in _available_artifacts: lowerCAmelCase_ : Optional[Any] = Artifact(lowerCAmelCase__ ) _available_artifacts[artifact_name].add_path(lowerCAmelCase__ ) return _available_artifacts if __name__ == "__main__": lowercase__ : List[Any] = get_job_links() lowercase__ : List[Any] = retrieve_available_artifacts() lowercase__ : Dict = collections.OrderedDict( [ ("""*.py""", """API Examples"""), ("""*.md""", """MD Examples"""), ] ) # This dict will contain all the information relative to each doc test category: # - failed: list of failed tests # - failures: dict in the format 'test': 'error_message' lowercase__ : Tuple = { v: { """failed""": [], """failures""": {}, } for v in docs.values() } # Link to the GitHub Action job lowercase__ : Dict = github_actions_job_links.get("""run_doctests""") lowercase__ : Tuple = available_artifacts["""doc_tests_gpu_test_reports"""].paths[0] lowercase__ : int = retrieve_artifact(artifact_path["""name"""]) if "stats" in artifact: lowercase__ , lowercase__ , lowercase__ : Optional[int] = handle_test_results(artifact["""stats"""]) lowercase__ : Tuple = failed lowercase__ : Dict = success lowercase__ : Any = time_spent[1:-1] + """, """ lowercase__ : Tuple = extract_first_line_failure(artifact["""failures_short"""]) for line in artifact["summary_short"].split("""\n"""): if re.search("""FAILED""", line): lowercase__ : Dict = line.replace("""FAILED """, """""") lowercase__ : int = line.split()[0].replace("""\n""", """""") if "::" in line: lowercase__ , lowercase__ : Union[str, Any] = line.split("""::""") else: lowercase__ , lowercase__ : Union[str, Any] = line, line for file_regex in docs.keys(): if fnmatch(file_path, file_regex): lowercase__ : Any = docs[file_regex] doc_test_results[category]["failed"].append(test) lowercase__ : List[str] = all_failures[test] if test in all_failures else """N/A""" lowercase__ : Any = failure break lowercase__ : int = Message("""🤗 Results of the doc tests.""", doc_test_results) message.post() message.post_reply()
354
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, is_batched, to_numpy_array, valid_images, ) from ...utils import TensorType, logging lowercase__ : Tuple = logging.get_logger(__name__) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" _SCREAMING_SNAKE_CASE = ["""pixel_values"""] def __init__( self : Any , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, int]] = None , SCREAMING_SNAKE_CASE_ : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Union[int, float] = 1 / 2_5_5 , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , **SCREAMING_SNAKE_CASE_ : int , ): super().__init__(**SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : Dict = size if size is not None else {'height': 2_2_4, 'width': 2_2_4} lowerCAmelCase_ : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : Tuple = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4} lowerCAmelCase_ : List[str] = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ , param_name='crop_size' ) lowerCAmelCase_ : List[Any] = do_resize lowerCAmelCase_ : Any = do_rescale lowerCAmelCase_ : int = do_normalize lowerCAmelCase_ : List[str] = do_center_crop lowerCAmelCase_ : Dict = crop_size lowerCAmelCase_ : Optional[Any] = size lowerCAmelCase_ : Tuple = resample lowerCAmelCase_ : Optional[int] = rescale_factor lowerCAmelCase_ : Optional[Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN lowerCAmelCase_ : List[str] = image_std if image_std is not None else IMAGENET_DEFAULT_STD def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Dict[str, int] , SCREAMING_SNAKE_CASE_ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : str , ): lowerCAmelCase_ : str = get_size_dict(SCREAMING_SNAKE_CASE_ ) if "shortest_edge" in size: lowerCAmelCase_ : Any = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=size['shortest_edge'] , default_to_square=SCREAMING_SNAKE_CASE_ ) # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: lowerCAmelCase_ : Union[str, Any] = (size['height'], size['width']) else: raise ValueError(F"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}" ) return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Dict[str, int] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : int , ): lowerCAmelCase_ : Any = get_size_dict(SCREAMING_SNAKE_CASE_ ) if "height" not in size or "width" not in size: raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" ) return center_crop(SCREAMING_SNAKE_CASE_ , size=(size['height'], size['width']) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : Optional[int] ): return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Union[float, List[float]] , SCREAMING_SNAKE_CASE_ : Union[float, List[float]] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : str , ): return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : ImageInput , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : PILImageResampling = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : int = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[float] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ : Optional[int] , ): lowerCAmelCase_ : Optional[int] = do_resize if do_resize is not None else self.do_resize lowerCAmelCase_ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase_ : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop lowerCAmelCase_ : Dict = crop_size if crop_size is not None else self.crop_size lowerCAmelCase_ : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='crop_size' , default_to_square=SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : Union[str, Any] = resample if resample is not None else self.resample lowerCAmelCase_ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase_ : str = image_mean if image_mean is not None else self.image_mean lowerCAmelCase_ : List[str] = image_std if image_std is not None else self.image_std lowerCAmelCase_ : Tuple = size if size is not None else self.size lowerCAmelCase_ : str = get_size_dict(SCREAMING_SNAKE_CASE_ ) if not is_batched(SCREAMING_SNAKE_CASE_ ): lowerCAmelCase_ : List[Any] = [images] if not valid_images(SCREAMING_SNAKE_CASE_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) # All transformations expect numpy arrays. lowerCAmelCase_ : str = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images] if do_resize: lowerCAmelCase_ : Any = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images] if do_center_crop: lowerCAmelCase_ : Optional[int] = [self.center_crop(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images] if do_rescale: lowerCAmelCase_ : Tuple = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images] if do_normalize: lowerCAmelCase_ : str = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ ) for image in images] lowerCAmelCase_ : Any = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images] lowerCAmelCase_ : Optional[Any] = {'pixel_values': images} return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
289
0
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def __snake_case ( UpperCAmelCase_ : Tuple=None ): if subparsers is not None: lowerCamelCase_ = subparsers.add_parser("test" ) else: lowerCamelCase_ = argparse.ArgumentParser("Accelerate test command" ) parser.add_argument( "--config_file" , default=UpperCAmelCase_ , help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ) , ) if subparsers is not None: parser.set_defaults(func=UpperCAmelCase_ ) return parser def __snake_case ( UpperCAmelCase_ : int ): lowerCamelCase_ = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] ) if args.config_file is None: lowerCamelCase_ = script_name else: lowerCamelCase_ = F'''--config_file={args.config_file} {script_name}''' lowerCamelCase_ = ["accelerate-launch"] + test_args.split() lowerCamelCase_ = execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy() ) if result.returncode == 0: print("Test is a success! You are ready for your distributed training!" ) def __snake_case ( ): lowerCamelCase_ = test_command_parser() lowerCamelCase_ = parser.parse_args() test_command(UpperCAmelCase_ ) if __name__ == "__main__": main()
55
'''simple docstring''' import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class snake_case : """simple docstring""" def __init__( self , UpperCamelCase , UpperCamelCase = 13 , UpperCamelCase = 64 , UpperCamelCase = 2 , UpperCamelCase = 3 , UpperCamelCase = 3 , UpperCamelCase = True , UpperCamelCase = True , UpperCamelCase = 128 , UpperCamelCase=[16, 32, 64, 128] , UpperCamelCase = 7 , UpperCamelCase = 4 , UpperCamelCase = 37 , UpperCamelCase = "gelu" , UpperCamelCase = 0.1 , UpperCamelCase = 0.1 , UpperCamelCase = 10 , UpperCamelCase = 0.02 , UpperCamelCase = 2 , UpperCamelCase = 1 , UpperCamelCase = 128 , UpperCamelCase = [2, 2, 2, 2] , UpperCamelCase = 2 , UpperCamelCase = 2 , ): """simple docstring""" lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = image_size lowerCamelCase_ = patch_size lowerCamelCase_ = num_channels lowerCamelCase_ = is_training lowerCamelCase_ = use_labels lowerCamelCase_ = hidden_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = intermediate_size lowerCamelCase_ = hidden_act lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = type_sequence_label_size lowerCamelCase_ = initializer_range lowerCamelCase_ = encoder_stride lowerCamelCase_ = num_attention_outputs lowerCamelCase_ = embed_dim lowerCamelCase_ = embed_dim + 1 lowerCamelCase_ = resolution lowerCamelCase_ = depths lowerCamelCase_ = hidden_sizes lowerCamelCase_ = dim lowerCamelCase_ = mlp_expansion_ratio def snake_case ( self ): """simple docstring""" lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase_ = None if self.use_labels: lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ = self.get_config() return config, pixel_values, labels def snake_case ( self ): """simple docstring""" return EfficientFormerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = TFEfficientFormerModel(config=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , training=UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = self.type_sequence_label_size lowerCamelCase_ = TFEfficientFormerForImageClassification(UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , labels=UpperCamelCase , training=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCamelCase_ = 1 lowerCamelCase_ = TFEfficientFormerForImageClassification(UpperCamelCase ) lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase_ = model(UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.prepare_config_and_inputs() lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = config_and_inputs lowerCamelCase_ = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class snake_case ( lowercase , lowercase , unittest.TestCase ): """simple docstring""" _lowerCamelCase = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) _lowerCamelCase = ( { "feature-extraction": TFEfficientFormerModel, "image-classification": ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False def snake_case ( self ): """simple docstring""" lowerCamelCase_ = TFEfficientFormerModelTester(self ) lowerCamelCase_ = ConfigTester( self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37 ) def snake_case ( self ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="EfficientFormer does not use inputs_embeds" ) def snake_case ( self ): """simple docstring""" pass @unittest.skip(reason="EfficientFormer does not support input and output embeddings" ) def snake_case ( self ): """simple docstring""" pass def snake_case ( self ): """simple docstring""" lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ = model_class(UpperCamelCase ) lowerCamelCase_ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase_ = [*signature.parameters.keys()] lowerCamelCase_ = ["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCamelCase ) def snake_case ( self ): """simple docstring""" def check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ): lowerCamelCase_ = model_class(UpperCamelCase ) lowerCamelCase_ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) , training=UpperCamelCase ) lowerCamelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase_ = getattr( self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(UpperCamelCase ) , UpperCamelCase ) if hasattr(self.model_tester , "encoder_seq_length" ): lowerCamelCase_ = self.model_tester.encoder_seq_length if hasattr(self.model_tester , "chunk_length" ) and self.model_tester.chunk_length > 1: lowerCamelCase_ = seq_length * self.model_tester.chunk_length else: lowerCamelCase_ = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) if config.is_encoder_decoder: lowerCamelCase_ = outputs.decoder_hidden_states self.asseretIsInstance(UpperCamelCase , (list, tuple) ) self.assertEqual(len(UpperCamelCase ) , UpperCamelCase ) lowerCamelCase_ = getattr(self.model_tester , "seq_length" , UpperCamelCase ) lowerCamelCase_ = getattr(self.model_tester , "decoder_seq_length" , UpperCamelCase ) self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , ) lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ = True check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase_ = True check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase=False ): """simple docstring""" lowerCamelCase_ = super()._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase ) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) @unittest.skip(reason="EfficientFormer does not implement masked image modeling yet" ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase ) @slow def snake_case ( self ): """simple docstring""" for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = TFEfficientFormerModel.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = True lowerCamelCase_ = getattr(self.model_tester , "seq_length" , UpperCamelCase ) lowerCamelCase_ = getattr(self.model_tester , "encoder_seq_length" , UpperCamelCase ) lowerCamelCase_ = getattr(self.model_tester , "key_length" , UpperCamelCase ) lowerCamelCase_ = getattr(self.model_tester , "chunk_length" , UpperCamelCase ) if chunk_length is not None and hasattr(self.model_tester , "num_hashes" ): lowerCamelCase_ = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: lowerCamelCase_ = True lowerCamelCase_ = False lowerCamelCase_ = True lowerCamelCase_ = model_class(UpperCamelCase ) lowerCamelCase_ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) , training=UpperCamelCase ) lowerCamelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_attention_outputs ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowerCamelCase_ = True lowerCamelCase_ = model_class(UpperCamelCase ) lowerCamelCase_ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) , training=UpperCamelCase ) lowerCamelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_attention_outputs ) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , ) else: self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , ) def snake_case ( self ): """simple docstring""" # We use a simplified version of this test for EfficientFormer because it requires training=False # and Keras refuses to let us force that during functional construction lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model lowerCamelCase_ = model_class(UpperCamelCase ) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes lowerCamelCase_ = { key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=UpperCamelCase ) for key, val in model.input_signature.items() if key in model.dummy_inputs } lowerCamelCase_ = model(UpperCamelCase ) self.assertTrue(outputs_dict is not None ) def __snake_case ( ): lowerCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class snake_case ( unittest.TestCase ): """simple docstring""" @cached_property def snake_case ( self ): """simple docstring""" return ( EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300" ) if is_vision_available() else None ) @slow def snake_case ( self ): """simple docstring""" lowerCamelCase_ = TFEfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300" ) lowerCamelCase_ = self.default_image_processor lowerCamelCase_ = prepare_img() lowerCamelCase_ = image_processor(images=UpperCamelCase , return_tensors="tf" ) # forward pass lowerCamelCase_ = model(**UpperCamelCase , training=UpperCamelCase ) # verify the logits lowerCamelCase_ = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , UpperCamelCase ) lowerCamelCase_ = tf.constant([-0.0_555, 0.4_825, -0.0_852] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 ) ) @slow def snake_case ( self ): """simple docstring""" lowerCamelCase_ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( "snap-research/efficientformer-l1-300" ) lowerCamelCase_ = self.default_image_processor lowerCamelCase_ = prepare_img() lowerCamelCase_ = image_processor(images=UpperCamelCase , return_tensors="tf" ) # forward pass lowerCamelCase_ = model(**UpperCamelCase , training=UpperCamelCase ) # verify the logits lowerCamelCase_ = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , UpperCamelCase ) lowerCamelCase_ = tf.constant([-0.1_312, 0.4_353, -1.0_499] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 ) )
55
1
"""simple docstring""" import logging import sys from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union import librosa import torch from datasets import DatasetDict, load_dataset from packaging import version from torch import nn from transformers import ( HfArgumentParser, Trainer, TrainingArguments, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForPreTraining, is_apex_available, trainer_utils, ) from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices if is_apex_available(): from apex import amp if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"): __lowerCamelCase : int = True from torch.cuda.amp import autocast __lowerCamelCase : Optional[Any] = logging.getLogger(__name__) @dataclass class SCREAMING_SNAKE_CASE__ : lowercase__ = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) lowercase__ = field( default=__UpperCAmelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) lowercase__ = field( default=__UpperCAmelCase , metadata={"help": "Whether to freeze the feature extractor layers of the model."} ) lowercase__ = field( default=__UpperCAmelCase , metadata={"help": "Whether to log verbose messages or not."} , ) lowercase__ = field( default=2.0 , metadata={"help": "Maximum temperature for gumbel softmax."} ) lowercase__ = field( default=0.5 , metadata={"help": "Minimum temperature for gumbel softmax."} ) lowercase__ = field( default=0.99_99_95 , metadata={"help": "Decay of gumbel temperature during training."} ) def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> str: '''simple docstring''' logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) lowercase_ = logging.WARNING if model_args.verbose_logging: lowercase_ = logging.DEBUG elif trainer_utils.is_main_process(training_args.local_rank ): lowercase_ = logging.INFO logger.setLevel(__lowerCAmelCase ) @dataclass class SCREAMING_SNAKE_CASE__ : lowercase__ = field( default=__UpperCAmelCase , metadata={"help": "The name of the dataset to use (via the datasets library)."} ) lowercase__ = field( default=__UpperCAmelCase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) lowercase__ = field( default="train" , metadata={ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'" } , ) lowercase__ = field( default="validation" , metadata={ "help": ( "The name of the validation data set split to use (via the datasets library). Defaults to 'validation'" ) } , ) lowercase__ = field( default="file" , metadata={"help": "Column in the dataset that contains speech file path. Defaults to 'file'"} , ) lowercase__ = field( default=__UpperCAmelCase , metadata={"help": "Overwrite the cached preprocessed datasets or not."} ) lowercase__ = field( default=1 , metadata={ "help": "The percentage of the train set used as validation set in case there's no validation split" } , ) lowercase__ = field( default=__UpperCAmelCase , metadata={"help": "The number of processes to use for the preprocessing."} , ) lowercase__ = field( default=20.0 , metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"} ) @dataclass class SCREAMING_SNAKE_CASE__ : lowercase__ = 42 lowercase__ = 42 lowercase__ = "longest" lowercase__ = None lowercase__ = None def __call__( self : Optional[int] , lowerCAmelCase_ : List[Dict[str, Union[List[int], torch.Tensor]]]): """simple docstring""" lowercase_ = self.feature_extractor.pad( lowerCAmelCase_ , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , ) lowercase_ = self.model._get_feat_extract_output_lengths(batch["""input_values"""].shape[-1]) lowercase_ = batch["""input_values"""].shape[0] # make sure that no loss is computed on padded inputs if batch["attention_mask"] is not None: # compute real output lengths according to convolution formula lowercase_ = self.model._get_feat_extract_output_lengths(batch["""attention_mask"""].sum(-1)).to( torch.long) lowercase_ = torch.zeros( (batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["""input_values"""].device) # these two operations makes sure that all values # before the output lengths indices are attended to lowercase_ = 1 lowercase_ = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool() # sample randomly masked indices lowercase_ = _compute_mask_indices( (batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=lowerCAmelCase_ , min_masks=2 , ) return batch class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ): def __init__( self : int , *lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]=1 , lowerCAmelCase_ : Dict=0 , lowerCAmelCase_ : str=1.0 , **lowerCAmelCase_ : Optional[Any]): """simple docstring""" super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_) lowercase_ = 0 lowercase_ = max_gumbel_temp lowercase_ = min_gumbel_temp lowercase_ = gumbel_temp_decay def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : nn.Module , lowerCAmelCase_ : Dict[str, Union[torch.Tensor, Any]]): """simple docstring""" model.train() lowercase_ = self._prepare_inputs(lowerCAmelCase_) if self.use_amp: with autocast(): lowercase_ = self.compute_loss(lowerCAmelCase_ , lowerCAmelCase_) else: lowercase_ = self.compute_loss(lowerCAmelCase_ , lowerCAmelCase_) if self.args.n_gpu > 1 or self.deepspeed: if model.module.config.ctc_loss_reduction == "mean": lowercase_ = loss.mean() elif model.module.config.ctc_loss_reduction == "sum": lowercase_ = loss.sum() / (inputs["""mask_time_indices"""]).sum() else: raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''') if self.args.gradient_accumulation_steps > 1: lowercase_ = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(lowerCAmelCase_).backward() elif self.use_apex: with amp.scale_loss(lowerCAmelCase_ , self.optimizer) as scaled_loss: scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(lowerCAmelCase_) else: loss.backward() self.num_update_step += 1 # make sure gumbel softmax temperature is decayed if self.args.n_gpu > 1 or self.deepspeed: model.module.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp)) else: model.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp)) return loss.detach() def _SCREAMING_SNAKE_CASE () -> List[str]: '''simple docstring''' lowercase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) lowercase_ , lowercase_ , lowercase_ = parser.parse_args_into_dataclasses() configure_logger(__lowerCAmelCase , __lowerCAmelCase ) # Downloading and loading a dataset from the hub. lowercase_ = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) if "validation" not in datasets.keys(): # make sure only "validation" and "train" keys remain" lowercase_ = DatasetDict() lowercase_ = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , ) lowercase_ = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , ) else: # make sure only "validation" and "train" keys remain" lowercase_ = DatasetDict() lowercase_ = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split="""validation""" , cache_dir=model_args.cache_dir , ) lowercase_ = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , ) # only normalized-inputs-training is supported lowercase_ = WavaVecaFeatureExtractor.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=__lowerCAmelCase ) def prepare_dataset(__lowerCAmelCase ): # check that all files have the correct sampling rate lowercase_ , lowercase_ = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate ) return batch # load audio files into numpy arrays lowercase_ = datasets.map( __lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["""train"""].column_names ) # filter audio files that are too long lowercase_ = vectorized_datasets.filter( lambda __lowerCAmelCase : len(data["""speech"""] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) ) def normalize(__lowerCAmelCase ): return feature_extractor(batch["""speech"""] , sampling_rate=feature_extractor.sampling_rate ) # normalize and transform to `BatchFeatures` lowercase_ = vectorized_datasets.map( __lowerCAmelCase , batched=__lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["""train"""].column_names , ) # pretraining is only supported for "newer" stable layer norm architecture # apply_spec_augment has to be True, mask_feature_prob has to be 0.0 lowercase_ = WavaVecaConfig.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , ) if not config.do_stable_layer_norm or config.feat_extract_norm != "layer": raise ValueError( """PreTraining is only supported for ``config.do_stable_layer_norm=True`` and""" """ ``config.feat_extract_norm='layer'""" ) lowercase_ = WavaVecaForPreTraining(__lowerCAmelCase ) lowercase_ = DataCollatorForWavaVecaPretraining(model=__lowerCAmelCase , feature_extractor=__lowerCAmelCase ) lowercase_ = WavaVecaPreTrainer( model=__lowerCAmelCase , data_collator=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=vectorized_datasets["""train"""] , eval_dataset=vectorized_datasets["""validation"""] , tokenizer=__lowerCAmelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , ) trainer.train() if __name__ == "__main__": main()
371
"""simple docstring""" import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase ): lowercase__ = BarthezTokenizer lowercase__ = BarthezTokenizerFast lowercase__ = True lowercase__ = True def _UpperCAmelCase ( self : List[Any]): """simple docstring""" super().setUp() lowercase_ = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""") tokenizer.save_pretrained(self.tmpdirname) tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowerCAmelCase_) lowercase_ = tokenizer def _UpperCAmelCase ( self : Any): """simple docstring""" lowercase_ = """<pad>""" lowercase_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_) , lowerCAmelCase_) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_) , lowerCAmelCase_) def _UpperCAmelCase ( self : Dict): """simple docstring""" lowercase_ = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , """<s>""") self.assertEqual(vocab_keys[1] , """<pad>""") self.assertEqual(vocab_keys[-1] , """<mask>""") self.assertEqual(len(lowerCAmelCase_) , 1_0_1_1_2_2) def _UpperCAmelCase ( self : Optional[int]): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2) @require_torch def _UpperCAmelCase ( self : List[str]): """simple docstring""" lowercase_ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] lowercase_ = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2] lowercase_ = self.tokenizer( lowerCAmelCase_ , max_length=len(lowerCAmelCase_) , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors="""pt""") self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_) self.assertEqual((2, 6) , batch.input_ids.shape) self.assertEqual((2, 6) , batch.attention_mask.shape) lowercase_ = batch.input_ids.tolist()[0] self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_) def _UpperCAmelCase ( self : List[Any]): """simple docstring""" if not self.test_rust_tokenizer: return lowercase_ = self.get_tokenizer() lowercase_ = self.get_rust_tokenizer() lowercase_ = """I was born in 92000, and this is falsé.""" lowercase_ = tokenizer.tokenize(lowerCAmelCase_) lowercase_ = rust_tokenizer.tokenize(lowerCAmelCase_) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_) lowercase_ = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_) lowercase_ = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_) lowercase_ = self.get_rust_tokenizer() lowercase_ = tokenizer.encode(lowerCAmelCase_) lowercase_ = rust_tokenizer.encode(lowerCAmelCase_) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_) @slow def _UpperCAmelCase ( self : int): """simple docstring""" lowercase_ = {"""input_ids""": [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. lowercase_ = [ """Le transformeur est un modèle d'apprentissage profond introduit en 2017, """ """utilisé principalement dans le domaine du traitement automatique des langues (TAL).""", """À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """ """pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """ """telles que la traduction et la synthèse de texte.""", ] self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase_ , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=lowerCAmelCase_ , )
313
0
def lowercase_ ( _lowerCamelCase : list[int] , _lowerCamelCase : int): lowercase__ : Optional[Any] = len(_lowerCamelCase) lowercase__ : Tuple = [[False] * (required_sum + 1) for _ in range(arr_len + 1)] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1): lowercase__ : List[Any] = True # sum is not zero and set is empty then false for i in range(1 , required_sum + 1): lowercase__ : Tuple = False for i in range(1 , arr_len + 1): for j in range(1 , required_sum + 1): if arr[i - 1] > j: lowercase__ : Optional[Any] = subset[i - 1][j] if arr[i - 1] <= j: lowercase__ : Dict = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
87
"""simple docstring""" import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version UpperCamelCase : Union[str, Any] = logging.getLogger(__name__) require_version("pytorch_lightning>=1.0.4") UpperCamelCase : int = { "base": AutoModel, "sequence-classification": AutoModelForSequenceClassification, "question-answering": AutoModelForQuestionAnswering, "pretraining": AutoModelForPreTraining, "token-classification": AutoModelForTokenClassification, "language-modeling": AutoModelWithLMHead, "summarization": AutoModelForSeqaSeqLM, "translation": AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization UpperCamelCase : Optional[Any] = { "linear": get_linear_schedule_with_warmup, "cosine": get_cosine_schedule_with_warmup, "cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup, "polynomial": get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } UpperCamelCase : str = sorted(arg_to_scheduler.keys()) UpperCamelCase : List[str] = "{" + ", ".join(arg_to_scheduler_choices) + "}" class __lowerCAmelCase ( pl.LightningModule ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase="base" , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ): '''simple docstring''' super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(__UpperCAmelCase ) __UpperCamelCase = 0 __UpperCamelCase = Path(self.hparams.output_dir ) __UpperCamelCase = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: __UpperCamelCase = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=__UpperCAmelCase , **__UpperCAmelCase , ) else: __UpperCamelCase = config __UpperCamelCase = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout') for p in extra_model_params: if getattr(self.hparams , __UpperCAmelCase , __UpperCAmelCase ): assert hasattr(self.config , __UpperCAmelCase ), F'model config doesn\'t have a `{p}` attribute' setattr(self.config , __UpperCAmelCase , getattr(self.hparams , __UpperCAmelCase ) ) if tokenizer is None: __UpperCamelCase = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__UpperCAmelCase , ) else: __UpperCamelCase = tokenizer __UpperCamelCase = MODEL_MODES[mode] if model is None: __UpperCamelCase = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__UpperCAmelCase , ) else: __UpperCamelCase = model def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = self.model_type.from_pretrained(*__UpperCAmelCase , **__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = arg_to_scheduler[self.hparams.lr_scheduler] __UpperCamelCase = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) __UpperCamelCase = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1} return scheduler def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.model __UpperCamelCase = ['bias', 'LayerNorm.weight'] __UpperCamelCase = [ { 'params': [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters 'weight_decay': self.hparams.weight_decay, }, { 'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], 'weight_decay': 0.0, }, ] if self.hparams.adafactor: __UpperCamelCase = Adafactor( __UpperCAmelCase , lr=self.hparams.learning_rate , scale_parameter=__UpperCAmelCase , relative_step=__UpperCAmelCase ) else: __UpperCamelCase = AdamW( __UpperCAmelCase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) __UpperCamelCase = optimizer __UpperCamelCase = self.get_lr_scheduler() return [optimizer], [scheduler] def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' return self.validation_step(__UpperCAmelCase , __UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' return self.validation_end(__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores __UpperCamelCase = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' if stage == "test": __UpperCamelCase = len(self.test_dataloader().dataset ) else: __UpperCamelCase = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=__UpperCAmelCase ) __UpperCamelCase = len(self.train_dataloader().dataset ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = False ): '''simple docstring''' raise NotImplementedError('You must implement this for your task' ) def UpperCAmelCase ( self ): '''simple docstring''' return self.train_loader def UpperCAmelCase ( self ): '''simple docstring''' return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' return os.path.join( self.hparams.data_dir , 'cached_{}_{}_{}'.format( __UpperCAmelCase , list(filter(__UpperCAmelCase , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = self.output_dir.joinpath('best_tfmr' ) __UpperCamelCase = self.step_count self.model.save_pretrained(__UpperCAmelCase ) self.tokenizer.save_pretrained(__UpperCAmelCase ) @staticmethod def UpperCAmelCase ( __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' parser.add_argument( '--model_name_or_path' , default=__UpperCAmelCase , type=__UpperCAmelCase , required=__UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--config_name' , default='' , type=__UpperCAmelCase , help='Pretrained config name or path if not the same as model_name' ) parser.add_argument( '--tokenizer_name' , default=__UpperCAmelCase , type=__UpperCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name' , ) parser.add_argument( '--cache_dir' , default=str(Path(__UpperCAmelCase ).parent / 'test_run' / 'cache' ) , type=__UpperCAmelCase , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , ) parser.add_argument( '--encoder_layerdrop' , type=__UpperCAmelCase , help='Encoder layer dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--decoder_layerdrop' , type=__UpperCAmelCase , help='Decoder layer dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--dropout' , type=__UpperCAmelCase , help='Dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--attention_dropout' , type=__UpperCAmelCase , help='Attention dropout probability (Optional). Goes into model.config' , ) parser.add_argument('--learning_rate' , default=5E-5 , type=__UpperCAmelCase , help='The initial learning rate for Adam.' ) parser.add_argument( '--lr_scheduler' , default='linear' , choices=__UpperCAmelCase , metavar=__UpperCAmelCase , type=__UpperCAmelCase , help='Learning rate scheduler' , ) parser.add_argument('--weight_decay' , default=0.0 , type=__UpperCAmelCase , help='Weight decay if we apply some.' ) parser.add_argument('--adam_epsilon' , default=1E-8 , type=__UpperCAmelCase , help='Epsilon for Adam optimizer.' ) parser.add_argument('--warmup_steps' , default=0 , type=__UpperCAmelCase , help='Linear warmup over warmup_steps.' ) parser.add_argument('--num_workers' , default=4 , type=__UpperCAmelCase , help='kwarg passed to DataLoader' ) parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=__UpperCAmelCase ) parser.add_argument('--train_batch_size' , default=32 , type=__UpperCAmelCase ) parser.add_argument('--eval_batch_size' , default=32 , type=__UpperCAmelCase ) parser.add_argument('--adafactor' , action='store_true' ) class __lowerCAmelCase ( pl.Callback ): def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class __lowerCAmelCase ( pl.Callback ): def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(__UpperCAmelCase ) class __lowerCAmelCase ( pl.Callback ): def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = trainer.lr_schedulers[0]['scheduler'] __UpperCamelCase = {F'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' rank_zero_info('***** Validation results *****' ) __UpperCamelCase = trainer.callback_metrics # Log results for key in sorted(__UpperCAmelCase ): if key not in ["log", "progress_bar"]: rank_zero_info('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' rank_zero_info('***** Test results *****' ) __UpperCamelCase = trainer.callback_metrics # Log and save results to file __UpperCamelCase = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' ) with open(__UpperCAmelCase , 'w' ) as writer: for key in sorted(__UpperCAmelCase ): if key not in ["log", "progress_bar"]: rank_zero_info('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) ) writer.write('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) ) def A ( snake_case :Any , snake_case :int ) -> None: # To allow all pl args uncomment the following line # parser = pl.Trainer.add_argparse_args(parser) parser.add_argument( '--output_dir' , default=str(Path(snake_case ).parent / 'test_run' / 'model_checkpoints' ) , type=snake_case , help='The output directory where the model predictions and checkpoints will be written.' , ) parser.add_argument( '--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , ) parser.add_argument( '--fp16_opt_level' , type=snake_case , default='O2' , help=( 'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].' 'See details at https://nvidia.github.io/apex/amp.html' ) , ) parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=snake_case ) parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=snake_case , help='Max gradient norm' ) parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' ) parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' ) parser.add_argument( '--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=snake_case , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , ) parser.add_argument('--seed' , type=snake_case , default=4_2 , help='random seed for initialization' ) parser.add_argument( '--data_dir' , default=str(Path(snake_case ).parent / 'test_run' / 'dummy-train-data' ) , type=snake_case , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , ) def A ( snake_case :BaseTransformer , snake_case :argparse.Namespace , snake_case :Union[str, Any]=None , snake_case :Union[str, Any]=True , snake_case :Any=[] , snake_case :Tuple=None , snake_case :List[str]=None , **snake_case :Union[str, Any] , ) -> Optional[int]: pl.seed_everything(args.seed ) # init model __UpperCamelCase = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=snake_case ) # add custom checkpoints if checkpoint_callback is None: __UpperCamelCase = pl.callbacks.ModelCheckpoint( filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(snake_case ) if logging_callback is None: __UpperCamelCase = LoggingCallback() __UpperCamelCase = {} if args.fpaa: __UpperCamelCase = 1_6 if args.gpus > 1: __UpperCamelCase = 'auto' __UpperCamelCase = 'ddp' __UpperCamelCase = args.accumulate_grad_batches __UpperCamelCase = None __UpperCamelCase = 'auto' __UpperCamelCase = pl.Trainer.from_argparse_args( snake_case , weights_summary=snake_case , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=snake_case , val_check_interval=1 , num_sanity_val_steps=2 , **snake_case , ) if args.do_train: trainer.fit(snake_case ) else: print('RAG modeling tests with new set functions successfuly executed!' ) return trainer
316
0
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType from ...utils.imports import is_botoa_available from .config_args import SageMakerConfig from .config_utils import ( DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_dynamo_backend, _convert_mixed_precision, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool, ) if is_botoa_available(): import botoa # noqa: F401 def lowerCamelCase__ ( snake_case_ : int ) -> Any: __snake_case = botoa.client('''iam''' ) __snake_case = { '''Version''': '''2012-10-17''', '''Statement''': [ {'''Effect''': '''Allow''', '''Principal''': {'''Service''': '''sagemaker.amazonaws.com'''}, '''Action''': '''sts:AssumeRole'''} ], } try: # create the role, associated with the chosen trust policy iam_client.create_role( RoleName=snake_case_ , AssumeRolePolicyDocument=json.dumps(snake_case_ , indent=2 ) ) __snake_case = { '''Version''': '''2012-10-17''', '''Statement''': [ { '''Effect''': '''Allow''', '''Action''': [ '''sagemaker:*''', '''ecr:GetDownloadUrlForLayer''', '''ecr:BatchGetImage''', '''ecr:BatchCheckLayerAvailability''', '''ecr:GetAuthorizationToken''', '''cloudwatch:PutMetricData''', '''cloudwatch:GetMetricData''', '''cloudwatch:GetMetricStatistics''', '''cloudwatch:ListMetrics''', '''logs:CreateLogGroup''', '''logs:CreateLogStream''', '''logs:DescribeLogStreams''', '''logs:PutLogEvents''', '''logs:GetLogEvents''', '''s3:CreateBucket''', '''s3:ListBucket''', '''s3:GetBucketLocation''', '''s3:GetObject''', '''s3:PutObject''', ], '''Resource''': '''*''', } ], } # attach policy to role iam_client.put_role_policy( RoleName=snake_case_ , PolicyName=f"""{role_name}_policy_permission""" , PolicyDocument=json.dumps(snake_case_ , indent=2 ) , ) except iam_client.exceptions.EntityAlreadyExistsException: print(f"""role {role_name} already exists. Using existing one""" ) def lowerCamelCase__ ( snake_case_ : Tuple ) -> Any: __snake_case = botoa.client('''iam''' ) return iam_client.get_role(RoleName=snake_case_ )["Role"]["Arn"] def lowerCamelCase__ ( ) -> Optional[int]: __snake_case = _ask_options( '''How do you want to authorize?''' , ['''AWS Profile''', '''Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '''] , snake_case_ , ) __snake_case = None if credentials_configuration == 0: __snake_case = _ask_field('''Enter your AWS Profile name: [default] ''' , default='''default''' ) __snake_case = aws_profile else: print( '''Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,''' '''`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`''' ) __snake_case = _ask_field('''AWS Access Key ID: ''' ) __snake_case = aws_access_key_id __snake_case = _ask_field('''AWS Secret Access Key: ''' ) __snake_case = aws_secret_access_key __snake_case = _ask_field('''Enter your AWS Region: [us-east-1]''' , default='''us-east-1''' ) __snake_case = aws_region __snake_case = _ask_options( '''Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?''' , ['''Provide IAM Role name''', '''Create new IAM role using credentials'''] , snake_case_ , ) if role_management == 0: __snake_case = _ask_field('''Enter your IAM role name: ''' ) else: __snake_case = '''accelerate_sagemaker_execution_role''' print(f"""Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials""" ) _create_iam_role_for_sagemaker(snake_case_ ) __snake_case = _ask_field( '''Do you want to use custom Docker image? [yes/NO]: ''' , _convert_yes_no_to_bool , default=snake_case_ , error_message='''Please enter yes or no.''' , ) __snake_case = None if is_custom_docker_image: __snake_case = _ask_field('''Enter your Docker image: ''' , lambda snake_case_ : str(snake_case_ ).lower() ) __snake_case = _ask_field( '''Do you want to provide SageMaker input channels with data locations? [yes/NO]: ''' , _convert_yes_no_to_bool , default=snake_case_ , error_message='''Please enter yes or no.''' , ) __snake_case = None if is_sagemaker_inputs_enabled: __snake_case = _ask_field( '''Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ''' , lambda snake_case_ : str(snake_case_ ).lower() , ) __snake_case = _ask_field( '''Do you want to enable SageMaker metrics? [yes/NO]: ''' , _convert_yes_no_to_bool , default=snake_case_ , error_message='''Please enter yes or no.''' , ) __snake_case = None if is_sagemaker_metrics_enabled: __snake_case = _ask_field( '''Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ''' , lambda snake_case_ : str(snake_case_ ).lower() , ) __snake_case = _ask_options( '''What is the distributed mode?''' , ['''No distributed training''', '''Data parallelism'''] , _convert_sagemaker_distributed_mode , ) __snake_case = {} __snake_case = _ask_field( '''Do you wish to optimize your script with torch dynamo?[yes/NO]:''' , _convert_yes_no_to_bool , default=snake_case_ , error_message='''Please enter yes or no.''' , ) if use_dynamo: __snake_case = '''dynamo_''' __snake_case = _ask_options( '''Which dynamo backend would you like to use?''' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , ) __snake_case = _ask_field( '''Do you want to customize the defaults sent to torch.compile? [yes/NO]: ''' , _convert_yes_no_to_bool , default=snake_case_ , error_message='''Please enter yes or no.''' , ) if use_custom_options: __snake_case = _ask_options( '''Which mode do you want to use?''' , snake_case_ , lambda snake_case_ : TORCH_DYNAMO_MODES[int(snake_case_ )] , default='''default''' , ) __snake_case = _ask_field( '''Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ''' , _convert_yes_no_to_bool , default=snake_case_ , error_message='''Please enter yes or no.''' , ) __snake_case = _ask_field( '''Do you want to enable dynamic shape tracing? [yes/NO]: ''' , _convert_yes_no_to_bool , default=snake_case_ , error_message='''Please enter yes or no.''' , ) __snake_case = '''Which EC2 instance type you want to use for your training?''' if distributed_type != SageMakerDistributedType.NO: __snake_case = _ask_options( snake_case_ , snake_case_ , lambda snake_case_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(snake_case_ )] ) else: eca_instance_query += "? [ml.p3.2xlarge]:" __snake_case = _ask_field(snake_case_ , lambda snake_case_ : str(snake_case_ ).lower() , default='''ml.p3.2xlarge''' ) __snake_case = 1 if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL): __snake_case = _ask_field( '''How many machines do you want use? [1]: ''' , snake_case_ , default=1 , ) __snake_case = _ask_options( '''Do you wish to use FP16 or BF16 (mixed precision)?''' , ['''no''', '''fp16''', '''bf16''', '''fp8'''] , _convert_mixed_precision , ) if use_dynamo and mixed_precision == "no": print( '''Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.''' ) return SageMakerConfig( image_uri=snake_case_ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=snake_case_ , use_cpu=snake_case_ , dynamo_config=snake_case_ , eca_instance_type=snake_case_ , profile=snake_case_ , region=snake_case_ , iam_role_name=snake_case_ , mixed_precision=snake_case_ , num_machines=snake_case_ , sagemaker_inputs_file=snake_case_ , sagemaker_metrics_file=snake_case_ , )
368
from __future__ import annotations snake_case_ = 'Muhammad Umer Farooq' snake_case_ = 'MIT' snake_case_ = '1.0.0' snake_case_ = 'Muhammad Umer Farooq' snake_case_ = 'contact@muhammadumerfarooq.me' snake_case_ = 'Alpha' import re from html.parser import HTMLParser from urllib import parse import requests class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): def __init__(self : Dict , a__ : str ): """simple docstring""" super().__init__() __snake_case = [] __snake_case = domain def a (self : Tuple , a__ : str , a__ : list[tuple[str, str | None]] ): """simple docstring""" if tag == "a": # Check the list of defined attributes. for name, value in attrs: # If href is defined, and not empty nor # print it. if name == "href" and value != "#" and value != "": # If not already in urls. if value not in self.urls: __snake_case = parse.urljoin(self.domain , a__ ) self.urls.append(a__ ) def lowerCamelCase__ ( snake_case_ : str ) -> str: return ".".join(get_sub_domain_name(snake_case_ ).split('''.''' )[-2:] ) def lowerCamelCase__ ( snake_case_ : str ) -> str: return parse.urlparse(snake_case_ ).netloc def lowerCamelCase__ ( snake_case_ : str = "https://github.com" ) -> list[str]: __snake_case = get_domain_name(snake_case_ ) # Initialize the parser __snake_case = Parser(snake_case_ ) try: # Open URL __snake_case = requests.get(snake_case_ ) # pass the raw HTML to the parser to get links parser.feed(r.text ) # Get links and loop through __snake_case = set() for link in parser.urls: # open URL. # read = requests.get(link) try: __snake_case = requests.get(snake_case_ ) # Get the valid email. __snake_case = re.findall('''[a-zA-Z0-9]+@''' + domain , read.text ) # If not in list then append it. for email in emails: valid_emails.add(snake_case_ ) except ValueError: pass except ValueError: raise SystemExit(1 ) # Finally return a sorted list of email addresses with no duplicates. return sorted(snake_case_ ) if __name__ == "__main__": snake_case_ = emails_from_url('https://github.com') print(F'{len(emails)} emails found:') print('\n'.join(sorted(emails)))
238
0
_snake_case = { '''Pillow''': '''Pillow<10.0.0''', '''accelerate''': '''accelerate>=0.20.3''', '''av''': '''av==9.2.0''', '''beautifulsoup4''': '''beautifulsoup4''', '''black''': '''black~=23.1''', '''codecarbon''': '''codecarbon==1.2.0''', '''cookiecutter''': '''cookiecutter==1.7.3''', '''dataclasses''': '''dataclasses''', '''datasets''': '''datasets!=2.5.0''', '''decord''': '''decord==0.6.0''', '''deepspeed''': '''deepspeed>=0.9.3''', '''diffusers''': '''diffusers''', '''dill''': '''dill<0.3.5''', '''evaluate''': '''evaluate>=0.2.0''', '''fairscale''': '''fairscale>0.3''', '''faiss-cpu''': '''faiss-cpu''', '''fastapi''': '''fastapi''', '''filelock''': '''filelock''', '''flax''': '''flax>=0.4.1,<=0.7.0''', '''ftfy''': '''ftfy''', '''fugashi''': '''fugashi>=1.0''', '''GitPython''': '''GitPython<3.1.19''', '''hf-doc-builder''': '''hf-doc-builder>=0.3.0''', '''huggingface-hub''': '''huggingface-hub>=0.14.1,<1.0''', '''importlib_metadata''': '''importlib_metadata''', '''ipadic''': '''ipadic>=1.0.0,<2.0''', '''isort''': '''isort>=5.5.4''', '''jax''': '''jax>=0.2.8,!=0.3.2,<=0.4.13''', '''jaxlib''': '''jaxlib>=0.1.65,<=0.4.13''', '''jieba''': '''jieba''', '''kenlm''': '''kenlm''', '''keras-nlp''': '''keras-nlp>=0.3.1''', '''librosa''': '''librosa''', '''nltk''': '''nltk''', '''natten''': '''natten>=0.14.6''', '''numpy''': '''numpy>=1.17''', '''onnxconverter-common''': '''onnxconverter-common''', '''onnxruntime-tools''': '''onnxruntime-tools>=1.4.2''', '''onnxruntime''': '''onnxruntime>=1.4.0''', '''opencv-python''': '''opencv-python''', '''optuna''': '''optuna''', '''optax''': '''optax>=0.0.8,<=0.1.4''', '''packaging''': '''packaging>=20.0''', '''parameterized''': '''parameterized''', '''phonemizer''': '''phonemizer''', '''protobuf''': '''protobuf''', '''psutil''': '''psutil''', '''pyyaml''': '''pyyaml>=5.1''', '''pydantic''': '''pydantic<2''', '''pytest''': '''pytest>=7.2.0''', '''pytest-timeout''': '''pytest-timeout''', '''pytest-xdist''': '''pytest-xdist''', '''python''': '''python>=3.8.0''', '''ray[tune]''': '''ray[tune]''', '''regex''': '''regex!=2019.12.17''', '''requests''': '''requests''', '''rhoknp''': '''rhoknp>=1.1.0,<1.3.1''', '''rjieba''': '''rjieba''', '''rouge-score''': '''rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1''', '''ruff''': '''ruff>=0.0.241,<=0.0.259''', '''sacrebleu''': '''sacrebleu>=1.4.12,<2.0.0''', '''sacremoses''': '''sacremoses''', '''safetensors''': '''safetensors>=0.3.1''', '''sagemaker''': '''sagemaker>=2.31.0''', '''scikit-learn''': '''scikit-learn''', '''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''', '''sigopt''': '''sigopt''', '''starlette''': '''starlette''', '''sudachipy''': '''sudachipy>=0.6.6''', '''sudachidict_core''': '''sudachidict_core>=20220729''', '''tensorflow-cpu''': '''tensorflow-cpu>=2.6,<2.14''', '''tensorflow''': '''tensorflow>=2.6,<2.14''', '''tensorflow-text''': '''tensorflow-text<2.14''', '''tf2onnx''': '''tf2onnx''', '''timeout-decorator''': '''timeout-decorator''', '''timm''': '''timm''', '''tokenizers''': '''tokenizers>=0.11.1,!=0.11.3,<0.14''', '''torch''': '''torch>=1.9,!=1.12.0''', '''torchaudio''': '''torchaudio''', '''torchvision''': '''torchvision''', '''pyctcdecode''': '''pyctcdecode>=0.4.0''', '''tqdm''': '''tqdm>=4.27''', '''unidic''': '''unidic>=1.0.2''', '''unidic_lite''': '''unidic_lite>=1.0.7''', '''urllib3''': '''urllib3<2.0.0''', '''uvicorn''': '''uvicorn''', }
157
import argparse import os import re _snake_case = '''src/transformers/models/auto''' # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict _snake_case = re.compile(r'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''') # re pattern that matches identifiers in mappings _snake_case = re.compile(r'''\s*\(\s*"(\S[^"]+)"''') def _UpperCamelCase ( snake_case__, snake_case__ = False ) -> List[Any]: with open(snake_case__, "r", encoding="utf-8" ) as f: __UpperCAmelCase : Dict = f.read() __UpperCAmelCase : Optional[Any] = content.split("\n" ) __UpperCAmelCase : int = [] __UpperCAmelCase : Optional[int] = 0 while line_idx < len(snake_case__ ): if _re_intro_mapping.search(lines[line_idx] ) is not None: __UpperCAmelCase : str = len(re.search(r"^(\s*)\S", lines[line_idx] ).groups()[0] ) + 8 # Start of a new mapping! while not lines[line_idx].startswith(" " * indent + "(" ): new_lines.append(lines[line_idx] ) line_idx += 1 __UpperCAmelCase : Dict = [] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": __UpperCAmelCase : str = line_idx while not lines[line_idx].startswith(" " * indent + ")" ): line_idx += 1 blocks.append("\n".join(lines[start_idx : line_idx + 1] ) ) else: blocks.append(lines[line_idx] ) line_idx += 1 # Sort blocks by their identifiers __UpperCAmelCase : Dict = sorted(snake_case__, key=lambda snake_case__ : _re_identifier.search(snake_case__ ).groups()[0] ) new_lines += blocks else: new_lines.append(lines[line_idx] ) line_idx += 1 if overwrite: with open(snake_case__, "w", encoding="utf-8" ) as f: f.write("\n".join(snake_case__ ) ) elif "\n".join(snake_case__ ) != content: return True def _UpperCamelCase ( snake_case__ = False ) -> Any: __UpperCAmelCase : str = [os.path.join(snake_case__, snake_case__ ) for f in os.listdir(snake_case__ ) if f.endswith(".py" )] __UpperCAmelCase : Optional[Any] = [sort_auto_mapping(snake_case__, overwrite=snake_case__ ) for fname in fnames] if not overwrite and any(snake_case__ ): __UpperCAmelCase : List[Any] = [f for f, d in zip(snake_case__, snake_case__ ) if d] raise ValueError( f'''The following files have auto mappings that need sorting: {', '.join(snake_case__ )}. Run `make style` to fix''' " this." ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''') _snake_case = parser.parse_args() sort_all_auto_mappings(not args.check_only)
157
1
import dataclasses import re import string from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple import numpy as np from . import residue_constants a__ = Mapping[str, np.ndarray] a__ = Mapping[str, Any] # Is a nested dict. a__ = 0.01 @dataclasses.dataclass(frozen=__lowercase ) class UpperCAmelCase_ : """simple docstring""" UpperCAmelCase__ : np.ndarray # [num_res, num_atom_type, 3] # Amino-acid type for each residue represented as an integer between 0 and # 20, where 20 is 'X'. UpperCAmelCase__ : np.ndarray # [num_res] # Binary float mask to indicate presence of a particular atom. 1.0 if an atom # is present and 0.0 if not. This should be used for loss masking. UpperCAmelCase__ : np.ndarray # [num_res, num_atom_type] # Residue index as used in PDB. It is not necessarily continuous or 0-indexed. UpperCAmelCase__ : np.ndarray # [num_res] # B-factors, or temperature factors, of each residue (in sq. angstroms units), # representing the displacement of the residue from its ground truth mean # value. UpperCAmelCase__ : np.ndarray # [num_res, num_atom_type] # Chain indices for multi-chain predictions UpperCAmelCase__ : Optional[np.ndarray] = None # Optional remark about the protein. Included as a comment in output PDB # files UpperCAmelCase__ : Optional[str] = None # Templates used to generate this protein (prediction-only) UpperCAmelCase__ : Optional[Sequence[str]] = None # Chain corresponding to each parent UpperCAmelCase__ : Optional[Sequence[int]] = None def __UpperCAmelCase ( __a : str ) -> Protein: """simple docstring""" _a : int = R'''(\[[A-Z]+\]\n)''' _a : List[str] = [tag.strip() for tag in re.split(__a ,__a ) if len(__a ) > 0] _a : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] ,[l.split('''\n''' ) for l in tags[1::2]] ) _a : List[str] = ["N", "CA", "C"] _a : Tuple = None _a : int = None _a : int = None for g in groups: if "[PRIMARY]" == g[0]: _a : Any = g[1][0].strip() for i in range(len(__a ) ): if seq[i] not in residue_constants.restypes: _a : Any = '''X''' # FIXME: strings are immutable _a : Dict = np.array( [residue_constants.restype_order.get(__a ,residue_constants.restype_num ) for res_symbol in seq] ) elif "[TERTIARY]" == g[0]: _a : List[List[float]] = [] for axis in range(3 ): tertiary.append(list(map(__a ,g[1][axis].split() ) ) ) _a : Union[str, Any] = np.array(__a ) _a : Any = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa ) for i, atom in enumerate(__a ): _a : Optional[int] = np.transpose(tertiary_np[:, i::3] ) atom_positions *= PICO_TO_ANGSTROM elif "[MASK]" == g[0]: _a : Any = np.array(list(map({'''-''': 0, '''+''': 1}.get ,g[1][0].strip() ) ) ) _a : Tuple = np.zeros( ( len(__a ), residue_constants.atom_type_num, ) ).astype(np.floataa ) for i, atom in enumerate(__a ): _a : Union[str, Any] = 1 atom_mask *= mask[..., None] assert aatype is not None return Protein( atom_positions=__a ,atom_mask=__a ,aatype=__a ,residue_index=np.arange(len(__a ) ) ,b_factors=__a ,) def __UpperCAmelCase ( __a : Protein ,__a : int = 0 ) -> List[str]: """simple docstring""" _a : List[str] = [] _a : str = prot.remark if remark is not None: pdb_headers.append(F"""REMARK {remark}""" ) _a : Optional[int] = prot.parents _a : Tuple = prot.parents_chain_index if parents is not None and parents_chain_index is not None: _a : int = [p for i, p in zip(__a ,__a ) if i == chain_id] if parents is None or len(__a ) == 0: _a : List[Any] = ['''N/A'''] pdb_headers.append(F"""PARENT {' '.join(__a )}""" ) return pdb_headers def __UpperCAmelCase ( __a : Protein ,__a : str ) -> str: """simple docstring""" _a : List[str] = [] _a : Union[str, Any] = pdb_str.split('''\n''' ) _a : Any = prot.remark if remark is not None: out_pdb_lines.append(F"""REMARK {remark}""" ) _a : List[List[str]] if prot.parents is not None and len(prot.parents ) > 0: _a : Tuple = [] if prot.parents_chain_index is not None: _a : Dict[str, List[str]] = {} for p, i in zip(prot.parents ,prot.parents_chain_index ): parent_dict.setdefault(str(__a ) ,[] ) parent_dict[str(__a )].append(__a ) _a : List[Any] = max([int(__a ) for chain_idx in parent_dict] ) for i in range(max_idx + 1 ): _a : int = parent_dict.get(str(__a ) ,['''N/A'''] ) parents_per_chain.append(__a ) else: parents_per_chain.append(list(prot.parents ) ) else: _a : Tuple = [['''N/A''']] def make_parent_line(__a : Sequence[str] ) -> str: return F"""PARENT {' '.join(__a )}""" out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) ) _a : List[Any] = 0 for i, l in enumerate(__a ): if "PARENT" not in l and "REMARK" not in l: out_pdb_lines.append(__a ) if "TER" in l and "END" not in lines[i + 1]: chain_counter += 1 if not chain_counter >= len(__a ): _a : str = parents_per_chain[chain_counter] else: _a : Optional[int] = ['''N/A'''] out_pdb_lines.append(make_parent_line(__a ) ) return "\n".join(__a ) def __UpperCAmelCase ( __a : Protein ) -> str: """simple docstring""" _a : str = residue_constants.restypes + ['''X'''] def res_atoa(__a : int ) -> str: return residue_constants.restype_atoa.get(restypes[r] ,'''UNK''' ) _a : Dict = residue_constants.atom_types _a : List[str] = [] _a : List[Any] = prot.atom_mask _a : Union[str, Any] = prot.aatype _a : Optional[Any] = prot.atom_positions _a : Dict = prot.residue_index.astype(np.intaa ) _a : Tuple = prot.b_factors _a : str = prot.chain_index if np.any(aatype > residue_constants.restype_num ): raise ValueError('''Invalid aatypes.''' ) _a : List[Any] = get_pdb_headers(__a ) if len(__a ) > 0: pdb_lines.extend(__a ) _a : List[str] = aatype.shape[0] _a : Optional[Any] = 1 _a : List[str] = 0 _a : Dict = string.ascii_uppercase _a : Optional[Any] = None # Add all atom sites. for i in range(__a ): _a : Any = res_atoa(aatype[i] ) for atom_name, pos, mask, b_factor in zip(__a ,atom_positions[i] ,atom_mask[i] ,b_factors[i] ): if mask < 0.5: continue _a : Optional[Any] = '''ATOM''' _a : List[Any] = atom_name if len(__a ) == 4 else F""" {atom_name}""" _a : Union[str, Any] = '''''' _a : Dict = '''''' _a : List[Any] = 1.00 _a : int = atom_name[0] # Protein supports only C, N, O, S, this works. _a : int = '''''' _a : Union[str, Any] = '''A''' if chain_index is not None: _a : int = chain_tags[chain_index[i]] # PDB is a columnar format, every space matters here! _a : Tuple = ( F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}""" F"""{res_name_a:>3} {chain_tag:>1}""" F"""{residue_index[i]:>4}{insertion_code:>1} """ F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}""" F"""{occupancy:>6.2f}{b_factor:>6.2f} """ F"""{element:>2}{charge:>2}""" ) pdb_lines.append(__a ) atom_index += 1 _a : List[Any] = i == n - 1 if chain_index is not None: if i != n - 1 and chain_index[i + 1] != prev_chain_index: _a : Optional[int] = True _a : Union[str, Any] = chain_index[i + 1] if should_terminate: # Close the chain. _a : str = '''TER''' _a : Any = ( F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}""" ) pdb_lines.append(__a ) atom_index += 1 if i != n - 1: # "prev" is a misnomer here. This happens at the beginning of # each new chain. pdb_lines.extend(get_pdb_headers(__a ,__a ) ) pdb_lines.append('''END''' ) pdb_lines.append('''''' ) return "\n".join(__a ) def __UpperCAmelCase ( __a : Protein ) -> np.ndarray: """simple docstring""" return residue_constants.STANDARD_ATOM_MASK[prot.aatype] def __UpperCAmelCase ( __a : FeatureDict ,__a : ModelOutput ,__a : Optional[np.ndarray] = None ,__a : Optional[np.ndarray] = None ,__a : Optional[str] = None ,__a : Optional[Sequence[str]] = None ,__a : Optional[Sequence[int]] = None ,) -> Protein: """simple docstring""" return Protein( aatype=features['''aatype'''] ,atom_positions=result['''final_atom_positions'''] ,atom_mask=result['''final_atom_mask'''] ,residue_index=features['''residue_index'''] + 1 ,b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) ,chain_index=__a ,remark=__a ,parents=__a ,parents_chain_index=__a ,)
15
import re import tempfile from pathlib import Path import pytest import yaml from datasets.utils.readme import ReadMe # @pytest.fixture # def example_yaml_structure(): a__ = yaml.safe_load( '''\ name: "" allow_empty: false allow_empty_text: true subsections: - name: "Dataset Card for X" # First-level markdown heading allow_empty: false allow_empty_text: true subsections: - name: "Table of Contents" allow_empty: false allow_empty_text: false subsections: null - name: "Dataset Description" allow_empty: false allow_empty_text: false subsections: - name: "Dataset Summary" allow_empty: false allow_empty_text: false subsections: null - name: "Supported Tasks and Leaderboards" allow_empty: true allow_empty_text: true subsections: null - name: Languages allow_empty: false allow_empty_text: true subsections: null ''' ) a__ = { '''name''': '''root''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ { '''name''': '''Dataset Card for My Dataset''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ {'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []}, { '''name''': '''Dataset Description''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Dataset Summary''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [], }, { '''name''': '''Supported Tasks and Leaderboards''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], }, {'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []}, ], }, ], } ], } a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. #### Extra Ignored Subsection ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = { '''name''': '''root''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ { '''name''': '''Dataset Card for My Dataset''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ {'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []}, { '''name''': '''Dataset Description''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Dataset Summary''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Extra Ignored Subsection''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], } ], }, { '''name''': '''Supported Tasks and Leaderboards''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], }, {'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []}, ], }, ], } ], } a__ = '''\ --- --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = ( '''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.''' ) a__ = '''\ # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = ( '''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.''' ) a__ = '''\ --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Languages Language Text ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.''' a__ = '''\ --- language: - zh - en --- ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text # Dataset Card My Dataset ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.''' a__ = '''\ --- language: - zh - en --- # Dataset Card My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.''' a__ = '''''' a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.''' @pytest.mark.parametrize( '''readme_md, expected_dict''' ,[ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] ,) def __UpperCAmelCase ( __a : Union[str, Any] ,__a : List[str] ) -> Optional[int]: """simple docstring""" assert ReadMe.from_string(__a ,__a ).to_dict() == expected_dict @pytest.mark.parametrize( '''readme_md, expected_error''' ,[ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] ,) def __UpperCAmelCase ( __a : List[str] ,__a : Optional[Any] ) -> Union[str, Any]: """simple docstring""" with pytest.raises(__a ,match=re.escape(expected_error.format(path='''root''' ) ) ): _a : List[Any] = ReadMe.from_string(__a ,__a ) readme.validate() @pytest.mark.parametrize( '''readme_md, expected_error''' ,[ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] ,) def __UpperCAmelCase ( __a : Dict ,__a : Dict ) -> Tuple: """simple docstring""" with pytest.raises(__a ,match=re.escape(expected_error.format(path='''root''' ) ) ): ReadMe.from_string(__a ,__a ) @pytest.mark.parametrize( '''readme_md,''' ,[ (README_MULTIPLE_SAME_HEADING_1), ] ,) def __UpperCAmelCase ( __a : Optional[Any] ) -> Tuple: """simple docstring""" ReadMe.from_string(__a ,__a ,suppress_parsing_errors=__a ) @pytest.mark.parametrize( '''readme_md, expected_dict''' ,[ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] ,) def __UpperCAmelCase ( __a : Union[str, Any] ,__a : Any ) -> Optional[int]: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _a : Tuple = Path(__a ) / '''README.md''' with open(__a ,'''w+''' ) as readme_file: readme_file.write(__a ) _a : Optional[Any] = ReadMe.from_readme(__a ,__a ).to_dict() assert out["name"] == path assert out["text"] == "" assert out["is_empty_text"] assert out["subsections"] == expected_dict["subsections"] @pytest.mark.parametrize( '''readme_md, expected_error''' ,[ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] ,) def __UpperCAmelCase ( __a : List[Any] ,__a : List[Any] ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _a : int = Path(__a ) / '''README.md''' with open(__a ,'''w+''' ) as readme_file: readme_file.write(__a ) _a : Optional[int] = expected_error.format(path=__a ) with pytest.raises(__a ,match=re.escape(__a ) ): _a : Any = ReadMe.from_readme(__a ,__a ) readme.validate() @pytest.mark.parametrize( '''readme_md, expected_error''' ,[ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] ,) def __UpperCAmelCase ( __a : str ,__a : Union[str, Any] ) -> Dict: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _a : Optional[Any] = Path(__a ) / '''README.md''' with open(__a ,'''w+''' ) as readme_file: readme_file.write(__a ) _a : str = expected_error.format(path=__a ) with pytest.raises(__a ,match=re.escape(__a ) ): ReadMe.from_readme(__a ,__a ) @pytest.mark.parametrize( '''readme_md,''' ,[ (README_MULTIPLE_SAME_HEADING_1), ] ,) def __UpperCAmelCase ( __a : Optional[Any] ) -> str: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _a : int = Path(__a ) / '''README.md''' with open(__a ,'''w+''' ) as readme_file: readme_file.write(__a ) ReadMe.from_readme(__a ,__a ,suppress_parsing_errors=__a )
15
1
"""simple docstring""" import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class _lowerCAmelCase ( lowercase ): """simple docstring""" __UpperCAmelCase : str = (EulerDiscreteScheduler,) __UpperCAmelCase : Optional[Any] = 1_0 def _lowercase ( self : List[str], **UpperCAmelCase__ : Optional[int] ): __lowercase = { "num_train_timesteps": 1_1_0_0, "beta_start": 0.0_001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**UpperCAmelCase__ ) return config def _lowercase ( self : str ): for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=UpperCAmelCase__ ) def _lowercase ( self : Dict ): for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001], [0.0_002, 0.002, 0.02] ): self.check_over_configs(beta_start=UpperCAmelCase__, beta_end=UpperCAmelCase__ ) def _lowercase ( self : Optional[Any] ): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=UpperCAmelCase__ ) def _lowercase ( self : int ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=UpperCAmelCase__ ) def _lowercase ( self : Dict ): __lowercase = self.scheduler_classes[0] __lowercase = self.get_scheduler_config() __lowercase = scheduler_class(**UpperCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps ) __lowercase = torch.manual_seed(0 ) __lowercase = self.dummy_model() __lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma __lowercase = sample.to(UpperCAmelCase__ ) for i, t in enumerate(scheduler.timesteps ): __lowercase = scheduler.scale_model_input(UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = model(UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = scheduler.step(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, generator=UpperCAmelCase__ ) __lowercase = output.prev_sample __lowercase = torch.sum(torch.abs(UpperCAmelCase__ ) ) __lowercase = torch.mean(torch.abs(UpperCAmelCase__ ) ) assert abs(result_sum.item() - 10.0_807 ) < 1E-2 assert abs(result_mean.item() - 0.0_131 ) < 1E-3 def _lowercase ( self : List[Any] ): __lowercase = self.scheduler_classes[0] __lowercase = self.get_scheduler_config(prediction_type="v_prediction" ) __lowercase = scheduler_class(**UpperCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps ) __lowercase = torch.manual_seed(0 ) __lowercase = self.dummy_model() __lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma __lowercase = sample.to(UpperCAmelCase__ ) for i, t in enumerate(scheduler.timesteps ): __lowercase = scheduler.scale_model_input(UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = model(UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = scheduler.step(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, generator=UpperCAmelCase__ ) __lowercase = output.prev_sample __lowercase = torch.sum(torch.abs(UpperCAmelCase__ ) ) __lowercase = torch.mean(torch.abs(UpperCAmelCase__ ) ) assert abs(result_sum.item() - 0.0_002 ) < 1E-2 assert abs(result_mean.item() - 2.2_676E-06 ) < 1E-3 def _lowercase ( self : str ): __lowercase = self.scheduler_classes[0] __lowercase = self.get_scheduler_config() __lowercase = scheduler_class(**UpperCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps, device=UpperCAmelCase__ ) __lowercase = torch.manual_seed(0 ) __lowercase = self.dummy_model() __lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() __lowercase = sample.to(UpperCAmelCase__ ) for t in scheduler.timesteps: __lowercase = scheduler.scale_model_input(UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = model(UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = scheduler.step(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, generator=UpperCAmelCase__ ) __lowercase = output.prev_sample __lowercase = torch.sum(torch.abs(UpperCAmelCase__ ) ) __lowercase = torch.mean(torch.abs(UpperCAmelCase__ ) ) assert abs(result_sum.item() - 10.0_807 ) < 1E-2 assert abs(result_mean.item() - 0.0_131 ) < 1E-3 def _lowercase ( self : str ): __lowercase = self.scheduler_classes[0] __lowercase = self.get_scheduler_config() __lowercase = scheduler_class(**UpperCAmelCase__, use_karras_sigmas=UpperCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps, device=UpperCAmelCase__ ) __lowercase = torch.manual_seed(0 ) __lowercase = self.dummy_model() __lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() __lowercase = sample.to(UpperCAmelCase__ ) for t in scheduler.timesteps: __lowercase = scheduler.scale_model_input(UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = model(UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = scheduler.step(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, generator=UpperCAmelCase__ ) __lowercase = output.prev_sample __lowercase = torch.sum(torch.abs(UpperCAmelCase__ ) ) __lowercase = torch.mean(torch.abs(UpperCAmelCase__ ) ) assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1E-2 assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1E-3
17
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
270
0
from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : Any = ["""vqvae"""] def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> Tuple: '''simple docstring''' super().__init__() self.register_modules(unet=snake_case__ , scheduler=snake_case__ , mel=snake_case__ , vqvae=snake_case__ ) def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' return 50 if isinstance(self.scheduler , snake_case__ ) else 1000 @torch.no_grad() def __call__( self , snake_case__ = 1 , snake_case__ = None , snake_case__ = None , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = None , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__=True , ) -> Union[ Union[AudioPipelineOutput, ImagePipelineOutput], Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]], ]: '''simple docstring''' UpperCAmelCase : str =steps or self.get_default_steps() self.scheduler.set_timesteps(snake_case__ ) UpperCAmelCase : int =step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: UpperCAmelCase : List[Any] =(self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: UpperCAmelCase : int =randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=snake_case__ , device=self.device , ) UpperCAmelCase : int =noise UpperCAmelCase : Optional[int] =None if audio_file is not None or raw_audio is not None: self.mel.load_audio(snake_case__ , snake_case__ ) UpperCAmelCase : Tuple =self.mel.audio_slice_to_image(snake_case__ ) UpperCAmelCase : Any =np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape( (input_image.height, input_image.width) ) UpperCAmelCase : Optional[int] =(input_image / 255) * 2 - 1 UpperCAmelCase : int =torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device ) if self.vqvae is not None: UpperCAmelCase : Optional[Any] =self.vqvae.encode(torch.unsqueeze(snake_case__ , 0 ) ).latent_dist.sample( generator=snake_case__ )[0] UpperCAmelCase : int =self.vqvae.config.scaling_factor * input_images if start_step > 0: UpperCAmelCase : Union[str, Any] =self.scheduler.add_noise(snake_case__ , snake_case__ , self.scheduler.timesteps[start_step - 1] ) UpperCAmelCase : List[str] =( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) UpperCAmelCase : Tuple =int(mask_start_secs * pixels_per_second ) UpperCAmelCase : List[str] =int(mask_end_secs * pixels_per_second ) UpperCAmelCase : Tuple =self.scheduler.add_noise(snake_case__ , snake_case__ , torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet , snake_case__ ): UpperCAmelCase : Dict =self.unet(snake_case__ , snake_case__ , snake_case__ )['''sample'''] else: UpperCAmelCase : str =self.unet(snake_case__ , snake_case__ )['''sample'''] if isinstance(self.scheduler , snake_case__ ): UpperCAmelCase : Optional[int] =self.scheduler.step( model_output=snake_case__ , timestep=snake_case__ , sample=snake_case__ , eta=snake_case__ , generator=snake_case__ , )['''prev_sample'''] else: UpperCAmelCase : str =self.scheduler.step( model_output=snake_case__ , timestep=snake_case__ , sample=snake_case__ , generator=snake_case__ , )['''prev_sample'''] if mask is not None: if mask_start > 0: UpperCAmelCase : int =mask[:, step, :, :mask_start] if mask_end > 0: UpperCAmelCase : Tuple =mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance UpperCAmelCase : List[str] =1 / self.vqvae.config.scaling_factor * images UpperCAmelCase : Any =self.vqvae.decode(snake_case__ )['''sample'''] UpperCAmelCase : Optional[Any] =(images / 2 + 0.5).clamp(0 , 1 ) UpperCAmelCase : Optional[int] =images.cpu().permute(0 , 2 , 3 , 1 ).numpy() UpperCAmelCase : Dict =(images * 255).round().astype('''uint8''' ) UpperCAmelCase : Optional[Any] =list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(snake_case__ , mode='''RGB''' ).convert('''L''' ) for _ in images) ) UpperCAmelCase : List[Any] =[self.mel.image_to_audio(snake_case__ ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(snake_case__ )[:, np.newaxis, :] ) , **ImagePipelineOutput(snake_case__ ) ) @torch.no_grad() def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = 50 ) -> np.ndarray: '''simple docstring''' assert isinstance(self.scheduler , snake_case__ ) self.scheduler.set_timesteps(snake_case__ ) UpperCAmelCase : List[Any] =np.array( [np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] ) UpperCAmelCase : Dict =(sample / 255) * 2 - 1 UpperCAmelCase : List[str] =torch.Tensor(snake_case__ ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ): UpperCAmelCase : str =t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps UpperCAmelCase : int =self.scheduler.alphas_cumprod[t] UpperCAmelCase : List[Any] =( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) UpperCAmelCase : List[str] =1 - alpha_prod_t UpperCAmelCase : List[Any] =self.unet(snake_case__ , snake_case__ )['''sample'''] UpperCAmelCase : Union[str, Any] =(1 - alpha_prod_t_prev) ** 0.5 * model_output UpperCAmelCase : Union[str, Any] =(sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) UpperCAmelCase : List[str] =sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def UpperCAmelCase__ ( snake_case__ , snake_case__ , snake_case__ ) -> torch.Tensor: '''simple docstring''' UpperCAmelCase : Dict =acos(torch.dot(torch.flatten(snake_case__ ) , torch.flatten(snake_case__ ) ) / torch.norm(snake_case__ ) / torch.norm(snake_case__ ) ) return sin((1 - alpha) * theta ) * xa / sin(snake_case__ ) + sin(alpha * theta ) * xa / sin(snake_case__ )
78
import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class __snake_case ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): __lowerCamelCase : Dict = StableUnCLIPPipeline __lowerCamelCase : int = TEXT_TO_IMAGE_PARAMS __lowerCamelCase : int = TEXT_TO_IMAGE_BATCH_PARAMS __lowerCamelCase : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS __lowerCamelCase : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false __lowerCamelCase : Optional[Any] = False def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : int =32 UpperCAmelCase : Union[str, Any] =embedder_hidden_size # prior components torch.manual_seed(0 ) UpperCAmelCase : Optional[Any] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) torch.manual_seed(0 ) UpperCAmelCase : int =CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case__ , projection_dim=snake_case__ , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) UpperCAmelCase : Dict =PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=snake_case__ , num_layers=1 , ) torch.manual_seed(0 ) UpperCAmelCase : Tuple =DDPMScheduler( variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=snake_case__ , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , ) # regular denoising components torch.manual_seed(0 ) UpperCAmelCase : Optional[int] =StableUnCLIPImageNormalizer(embedding_dim=snake_case__ ) UpperCAmelCase : Any =DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' ) torch.manual_seed(0 ) UpperCAmelCase : List[str] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) torch.manual_seed(0 ) UpperCAmelCase : List[str] =CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) UpperCAmelCase : Optional[int] =UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=snake_case__ , layers_per_block=1 , upcast_attention=snake_case__ , use_linear_projection=snake_case__ , ) torch.manual_seed(0 ) UpperCAmelCase : List[Any] =DDIMScheduler( beta_schedule='''scaled_linear''' , beta_start=0.0_0085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=snake_case__ , steps_offset=1 , ) torch.manual_seed(0 ) UpperCAmelCase : Dict =AutoencoderKL() UpperCAmelCase : Tuple ={ # prior components '''prior_tokenizer''': prior_tokenizer, '''prior_text_encoder''': prior_text_encoder, '''prior''': prior, '''prior_scheduler''': prior_scheduler, # image noising components '''image_normalizer''': image_normalizer, '''image_noising_scheduler''': image_noising_scheduler, # regular denoising components '''tokenizer''': tokenizer, '''text_encoder''': text_encoder, '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, } return components def UpperCAmelCase__ ( self , snake_case__ , snake_case__=0 ) -> List[Any]: '''simple docstring''' if str(snake_case__ ).startswith('''mps''' ): UpperCAmelCase : Union[str, Any] =torch.manual_seed(snake_case__ ) else: UpperCAmelCase : Any =torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) UpperCAmelCase : str ={ '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''prior_num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def UpperCAmelCase__ ( self ) -> Any: '''simple docstring''' UpperCAmelCase : Tuple =torch_device == '''cpu''' self._test_attention_slicing_forward_pass(test_max_difference=snake_case__ ) def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' UpperCAmelCase : List[Any] =torch_device in ['''cpu''', '''mps'''] self._test_inference_batch_single_identical(test_max_difference=snake_case__ ) @slow @require_torch_gpu class __snake_case ( unittest.TestCase ): def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' UpperCAmelCase : Union[str, Any] =load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' ) UpperCAmelCase : Optional[int] =StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() UpperCAmelCase : int =torch.Generator(device='''cpu''' ).manual_seed(0 ) UpperCAmelCase : int =pipe('''anime turle''' , generator=snake_case__ , output_type='''np''' ) UpperCAmelCase : str =output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(snake_case__ , snake_case__ ) def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() UpperCAmelCase : List[str] =StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa ) UpperCAmelCase : str =pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() UpperCAmelCase : Any =pipe( '''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , ) UpperCAmelCase : Tuple =torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
78
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available A_ = { '''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ '''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LongT5EncoderModel''', '''LongT5ForConditionalGeneration''', '''LongT5Model''', '''LongT5PreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ '''FlaxLongT5ForConditionalGeneration''', '''FlaxLongT5Model''', '''FlaxLongT5PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longta import ( LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongTaEncoderModel, LongTaForConditionalGeneration, LongTaModel, LongTaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_longta import ( FlaxLongTaForConditionalGeneration, FlaxLongTaModel, FlaxLongTaPreTrainedModel, ) else: import sys A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
64
from manim import * class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def _lowerCAmelCase ( self ): A : Union[str, Any] = Rectangle(height=0.5, width=0.5 ) A : Optional[int] = Rectangle(height=0.25, width=0.25 ) A : Optional[Any] = Rectangle(height=0.46, width=0.46 ).set_stroke(width=0 ) A : List[str] = [mem.copy() for i in range(6 )] A : Any = [mem.copy() for i in range(6 )] A : int = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__, buff=0 ) A : Tuple = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__, buff=0 ) A : str = VGroup(lowerCamelCase__, lowerCamelCase__ ).arrange(lowerCamelCase__, buff=0 ) A : List[Any] = Text("""CPU""", font_size=24 ) A : Optional[int] = Group(lowerCamelCase__, lowerCamelCase__ ).arrange(lowerCamelCase__, buff=0.5, aligned_edge=lowerCamelCase__ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(lowerCamelCase__ ) A : List[Any] = [mem.copy() for i in range(4 )] A : Optional[Any] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__, buff=0 ) A : Dict = Text("""GPU""", font_size=24 ) A : Any = Group(lowerCamelCase__, lowerCamelCase__ ).arrange(lowerCamelCase__, buff=0.5, aligned_edge=lowerCamelCase__ ) gpu.move_to([-1, -1, 0] ) self.add(lowerCamelCase__ ) A : Optional[int] = [mem.copy() for i in range(6 )] A : List[str] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__, buff=0 ) A : Optional[int] = Text("""Model""", font_size=24 ) A : List[Any] = Group(lowerCamelCase__, lowerCamelCase__ ).arrange(lowerCamelCase__, buff=0.5, aligned_edge=lowerCamelCase__ ) model.move_to([3, -1.0, 0] ) self.add(lowerCamelCase__ ) A : Tuple = [] A : Tuple = [] A : Any = [] for i, rect in enumerate(lowerCamelCase__ ): rect.set_stroke(lowerCamelCase__ ) A : Any = Rectangle(height=0.46 / 4, width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase__, opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ), buff=0.02, direction=lowerCamelCase__ ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0], direction=lowerCamelCase__, buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1], direction=lowerCamelCase__, buff=0.0 ) self.add(lowerCamelCase__ ) model_cpu_arr.append(lowerCamelCase__ ) self.add(*lowerCamelCase__, *lowerCamelCase__, *lowerCamelCase__ ) A : int = [mem.copy() for i in range(6 )] A : List[Any] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__, buff=0 ) A : str = Text("""Loaded Checkpoint""", font_size=24 ) A : List[str] = Group(lowerCamelCase__, lowerCamelCase__ ).arrange(lowerCamelCase__, buff=0.5, aligned_edge=lowerCamelCase__ ) checkpoint.move_to([3, 0.5, 0] ) self.add(lowerCamelCase__ ) A : Optional[int] = [] A : List[Any] = [] for i, rect in enumerate(lowerCamelCase__ ): A : int = fill.copy().set_fill(lowerCamelCase__, opacity=0.7 ) target.move_to(lowerCamelCase__ ) ckpt_arr.append(lowerCamelCase__ ) A : List[Any] = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(lowerCamelCase__ ) self.add(*lowerCamelCase__, *lowerCamelCase__ ) A : str = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) A : List[Any] = MarkupText( f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''', font_size=18, ) key_text.move_to([-5, 2.4, 0] ) self.add(lowerCamelCase__, lowerCamelCase__ ) A : Union[str, Any] = MarkupText( f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''', font_size=18, ) blue_text.next_to(lowerCamelCase__, DOWN * 2.4, aligned_edge=key_text.get_left() ) self.add(lowerCamelCase__ ) A : List[str] = MarkupText( f'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''', font_size=24, ) step_a.move_to([2, 2, 0] ) A : List[str] = [meta_mem.copy() for i in range(6 )] A : List[Any] = [meta_mem.copy() for i in range(6 )] A : List[Any] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__, buff=0 ) A : List[str] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__, buff=0 ) A : Dict = VGroup(lowerCamelCase__, lowerCamelCase__ ).arrange(lowerCamelCase__, buff=0 ) A : Optional[Any] = Text("""Disk""", font_size=24 ) A : List[str] = Group(lowerCamelCase__, lowerCamelCase__ ).arrange(lowerCamelCase__, buff=0.5, aligned_edge=lowerCamelCase__ ) disk.move_to([-4.0, -1.25, 0] ) self.play(Write(lowerCamelCase__, run_time=3 ), Write(lowerCamelCase__, run_time=1 ), Create(lowerCamelCase__, run_time=1 ) ) A : str = [] for i, rect in enumerate(lowerCamelCase__ ): A : Optional[Any] = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(lowerCamelCase__, run_time=1.5 ) ) self.play(*lowerCamelCase__ ) self.play(FadeOut(lowerCamelCase__ ) ) A : List[str] = MarkupText(f'''Then, the checkpoint is removed from memory\nthrough garbage collection.''', font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(lowerCamelCase__, run_time=3 ) ) self.play( FadeOut(lowerCamelCase__, lowerCamelCase__, *lowerCamelCase__, *lowerCamelCase__ ), ) self.wait()
116
0
'''simple docstring''' from importlib import import_module from .logging import get_logger __lowercase: int = get_logger(__name__) class UpperCAmelCase : def __init__( self : List[Any], a_ : int, a_ : Union[str, Any]=None ): """simple docstring""" UpperCamelCase__ = attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith("__" ): setattr(self, a_, getattr(a_, a_ ) ) UpperCamelCase__ = module._original_module if isinstance(a_, _PatchedModuleObj ) else module class UpperCAmelCase : _lowerCamelCase : Optional[int] = [] def __init__( self : Tuple, a_ : Dict, a_ : str, a_ : Optional[int], a_ : List[Any]=None ): """simple docstring""" UpperCamelCase__ = obj UpperCamelCase__ = target UpperCamelCase__ = new UpperCamelCase__ = target.split("." )[0] UpperCamelCase__ = {} UpperCamelCase__ = attrs or [] def __enter__( self : Any ): """simple docstring""" *UpperCamelCase__ , UpperCamelCase__ = self.target.split("." ) # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(a_ ) ): try: UpperCamelCase__ = import_module(".".join(submodules[: i + 1] ) ) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): UpperCamelCase__ = getattr(self.obj, a_ ) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( (isinstance(a_, _PatchedModuleObj ) and obj_attr._original_module is submodule) ): UpperCamelCase__ = obj_attr # patch at top level setattr(self.obj, a_, _PatchedModuleObj(a_, attrs=self.attrs ) ) UpperCamelCase__ = getattr(self.obj, a_ ) # construct lower levels patches for key in submodules[i + 1 :]: setattr(a_, a_, _PatchedModuleObj(getattr(a_, a_, a_ ), attrs=self.attrs ) ) UpperCamelCase__ = getattr(a_, a_ ) # finally set the target attribute setattr(a_, a_, self.new ) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: UpperCamelCase__ = getattr(import_module(".".join(a_ ) ), a_ ) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj, a_ ) is attr_value: UpperCamelCase__ = getattr(self.obj, a_ ) setattr(self.obj, a_, self.new ) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" UpperCamelCase__ = globals()["__builtins__"][target_attr] setattr(self.obj, a_, self.new ) else: raise RuntimeError(f'Tried to patch attribute {target_attr} instead of a submodule.' ) def __exit__( self : Any, *a_ : Tuple ): """simple docstring""" for attr in list(self.original ): setattr(self.obj, a_, self.original.pop(a_ ) ) def lowercase_ ( self : Optional[Any] ): """simple docstring""" self.__enter__() self._active_patches.append(self ) def lowercase_ ( self : Any ): """simple docstring""" try: self._active_patches.remove(self ) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
31
'''simple docstring''' from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal __lowercase: str = logging.get_logger(__name__) __lowercase: Tuple = TypeVar("DatasetType", Dataset, IterableDataset) def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[DatasetType] , _UpperCamelCase : Optional[List[float]] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[DatasetInfo] = None , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType: '''simple docstring''' from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError("Unable to interleave an empty list of datasets." ) for i, dataset in enumerate(_UpperCamelCase ): if not isinstance(_UpperCamelCase , (Dataset, IterableDataset) ): if isinstance(_UpperCamelCase , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ' "is an empty dataset dictionary." ) raise ValueError( F'Dataset at position {i} has at least one split: {list(_UpperCamelCase )}\n' F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCamelCase ) )}\']' ) raise ValueError( F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCamelCase ).__name__}.' ) if i == 0: UpperCamelCase__ , UpperCamelCase__ = ( (Dataset, IterableDataset) if isinstance(_UpperCamelCase , _UpperCamelCase ) else (IterableDataset, Dataset) ) elif not isinstance(_UpperCamelCase , _UpperCamelCase ): raise ValueError( F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(F'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' ) if dataset_type is Dataset: return _interleave_map_style_datasets( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , stopping_strategy=_UpperCamelCase ) else: return _interleave_iterable_datasets( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , stopping_strategy=_UpperCamelCase ) def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[DatasetType] , _UpperCamelCase : Optional[DatasetInfo] = None , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : int = 0 , ) -> DatasetType: '''simple docstring''' if not dsets: raise ValueError("Unable to concatenate an empty list of datasets." ) for i, dataset in enumerate(_UpperCamelCase ): if not isinstance(_UpperCamelCase , (Dataset, IterableDataset) ): if isinstance(_UpperCamelCase , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ' "is an empty dataset dictionary." ) raise ValueError( F'Dataset at position {i} has at least one split: {list(_UpperCamelCase )}\n' F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCamelCase ) )}\']' ) raise ValueError( F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCamelCase ).__name__}.' ) if i == 0: UpperCamelCase__ , UpperCamelCase__ = ( (Dataset, IterableDataset) if isinstance(_UpperCamelCase , _UpperCamelCase ) else (IterableDataset, Dataset) ) elif not isinstance(_UpperCamelCase , _UpperCamelCase ): raise ValueError( F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' ) if dataset_type is Dataset: return _concatenate_map_style_datasets(_UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , axis=_UpperCamelCase ) else: return _concatenate_iterable_datasets(_UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , axis=_UpperCamelCase )
31
1
import argparse import torch from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert from transformers.utils import logging logging.set_verbosity_info() def A_ ( a , a , a ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = RemBertConfig.from_json_file(a_ ) print('Building PyTorch model from configuration: {}'.format(str(a_ ) ) ) SCREAMING_SNAKE_CASE_ : Any = RemBertModel(a_ ) # Load weights from tf checkpoint load_tf_weights_in_rembert(a_ , a_ , a_ ) # Save pytorch-model print('Save PyTorch model to {}'.format(a_ ) ) torch.save(model.state_dict() , a_ ) if __name__ == "__main__": lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--rembert_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained RemBERT model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) lowerCAmelCase : Union[str, Any] = parser.parse_args() convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
253
'''simple docstring''' import math def __UpperCAmelCase ( a_: int ): return math.sqrt(a_ ) * math.sqrt(a_ ) == num def __UpperCAmelCase ( a_: int ): _UpperCAmelCase : Dict = 0 _UpperCAmelCase : List[str] = n while left <= right: _UpperCAmelCase : Dict = (left + right) // 2 if mid**2 == n: return True elif mid**2 > n: _UpperCAmelCase : int = mid - 1 else: _UpperCAmelCase : Tuple = mid + 1 return False if __name__ == "__main__": import doctest doctest.testmod()
145
0
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class _SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): def __init__( self : str , a__ : int , a__ : Dict ): super().__init__() # make sure scheduler can always be converted to DDIM __magic_name__ = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=A_ , scheduler=A_ ) @torch.no_grad() def __call__( self : Tuple , a__ : int = 1 , a__ : Dict = None , a__ : int = 0.0 , a__ : List[str] = 50 , a__ : Tuple = None , a__ : Any = "pil" , a__ : List[str] = True , ): # Sample gaussian noise to begin loop if isinstance(self.unet.config.sample_size , A_ ): __magic_name__ = ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: __magic_name__ = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(A_ , A_ ) and len(A_ ) != batch_size: raise ValueError( F'''You have passed a list of generators of length {len(A_ )}, but requested an effective batch''' F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) __magic_name__ = randn_tensor(A_ , generator=A_ , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(A_ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output __magic_name__ = self.unet(A_ , A_ ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 __magic_name__ = self.scheduler.step( A_ , A_ , A_ , eta=A_ , use_clipped_model_output=A_ , generator=A_ ).prev_sample __magic_name__ = (image / 2 + 0.5).clamp(0 , 1 ) __magic_name__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __magic_name__ = self.numpy_to_pil(A_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=A_ )
371
'''simple docstring''' from random import randint, random def UpperCamelCase ( a , a , a , a = False , a = False , a = 5 , ) -> list: '''simple docstring''' __magic_name__ = [[-1] * number_of_cells] # Create a highway without any car __magic_name__ = 0 __magic_name__ = max(a , 0 ) while i < number_of_cells: __magic_name__ = ( randint(0 , a ) if random_speed else initial_speed ) # Place the cars i += ( randint(1 , max_speed * 2 ) if random_frequency else frequency ) # Arbitrary number, may need tuning return highway def UpperCamelCase ( a , a ) -> int: '''simple docstring''' __magic_name__ = 0 __magic_name__ = highway_now[car_index + 1 :] for cell in range(len(a ) ): # May need a better name for this if cells[cell] != -1: # If the cell is not empty then return distance # we have the distance we wanted distance += 1 # Here if the car is near the end of the highway return distance + get_distance(a , -1 ) def UpperCamelCase ( a , a , a ) -> list: '''simple docstring''' __magic_name__ = len(a ) # Beforce calculations, the highway is empty __magic_name__ = [-1] * number_of_cells for car_index in range(a ): if highway_now[car_index] != -1: # Add 1 to the current speed of the car and cap the speed __magic_name__ = min(highway_now[car_index] + 1 , a ) # Number of empty cell before the next car __magic_name__ = get_distance(a , a ) - 1 # We can't have the car causing an accident __magic_name__ = min(next_highway[car_index] , a ) if random() < probability: # Randomly, a driver will slow down __magic_name__ = max(next_highway[car_index] - 1 , 0 ) return next_highway def UpperCamelCase ( a , a , a , a ) -> list: '''simple docstring''' __magic_name__ = len(highway[0] ) for i in range(a ): __magic_name__ = update(highway[i] , a , a ) __magic_name__ = [-1] * number_of_cells for car_index in range(a ): __magic_name__ = next_speeds_calculated[car_index] if speed != -1: # Change the position based on the speed (with % to create the loop) __magic_name__ = (car_index + speed) % number_of_cells # Commit the change of position __magic_name__ = speed highway.append(a ) return highway if __name__ == "__main__": import doctest doctest.testmod()
98
0
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = {"""vocab_file""": """spiece.model"""} UpperCAmelCase = { """vocab_file""": { """bert_for_seq_generation""": ( """https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model""" ), } } UpperCAmelCase = {"""bert_for_seq_generation""": 512} class UpperCAmelCase_ ( _lowercase): snake_case__ = VOCAB_FILES_NAMES snake_case__ = PRETRAINED_VOCAB_FILES_MAP snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ = [] snake_case__ = ['''input_ids''', '''attention_mask'''] def __init__( self : Any , __UpperCamelCase : int , __UpperCamelCase : Optional[int]="<s>" , __UpperCamelCase : Optional[Any]="</s>" , __UpperCamelCase : Optional[Any]="<unk>" , __UpperCamelCase : Tuple="<pad>" , __UpperCamelCase : int="<::::>" , __UpperCamelCase : Optional[Dict[str, Any]] = None , **__UpperCamelCase : Any , ) -> None: _UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , sep_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , ) _UpperCamelCase = vocab_file _UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__UpperCamelCase ) @property def _UpperCamelCase ( self : Optional[int] ) -> Tuple: return self.sp_model.get_piece_size() def _UpperCamelCase ( self : int ) -> Optional[int]: _UpperCamelCase = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : List[Any] ) -> Union[str, Any]: _UpperCamelCase = self.__dict__.copy() _UpperCamelCase = None return state def __setstate__( self : str , __UpperCamelCase : Any ) -> Tuple: _UpperCamelCase = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): _UpperCamelCase = {} _UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : str ) -> List[str]: return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase ) def _UpperCamelCase ( self : Tuple , __UpperCamelCase : Any ) -> Optional[int]: return self.sp_model.piece_to_id(__UpperCamelCase ) def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Optional[int] ) -> Optional[Any]: _UpperCamelCase = self.sp_model.IdToPiece(__UpperCamelCase ) return token def _UpperCamelCase ( self : str , __UpperCamelCase : Dict ) -> Optional[Any]: _UpperCamelCase = [] _UpperCamelCase = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(__UpperCamelCase ) + token _UpperCamelCase = [] else: current_sub_tokens.append(__UpperCamelCase ) out_string += self.sp_model.decode(__UpperCamelCase ) return out_string.strip() def _UpperCamelCase ( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__UpperCamelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _UpperCamelCase = os.path.join( __UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCamelCase , '''wb''' ) as fi: _UpperCamelCase = self.sp_model.serialized_model_proto() fi.write(__UpperCamelCase ) return (out_vocab_file,)
256
"""simple docstring""" import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) UpperCAmelCase = logging.getLogger() def lowercase ( ) -> List[str]: _UpperCamelCase = argparse.ArgumentParser() parser.add_argument('''-f''' ) _UpperCamelCase = parser.parse_args() return args.f def lowercase ( a__ : List[Any] ) -> Optional[Any]: _UpperCamelCase = {} _UpperCamelCase = os.path.join(a__ , '''all_results.json''' ) if os.path.exists(a__ ): with open(a__ , '''r''' ) as f: _UpperCamelCase = json.load(a__ ) else: raise ValueError(F'''can\'t find {path}''' ) return results def lowercase ( ) -> str: _UpperCamelCase = torch.cuda.is_available() and torch_device == '''cuda''' return is_using_cuda and is_apex_available() UpperCAmelCase = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class UpperCAmelCase_ ( _lowercase): @classmethod def _UpperCamelCase ( cls : Any ) -> List[Any]: # Write Accelerate config, will pick up on CPU, GPU, and multi-GPU _UpperCamelCase = tempfile.mkdtemp() _UpperCamelCase = os.path.join(cls.tmpdir , '''default_config.yml''' ) write_basic_config(save_location=cls.configPath ) _UpperCamelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath] @classmethod def _UpperCamelCase ( cls : int ) -> str: shutil.rmtree(cls.tmpdir ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def _UpperCamelCase ( self : Union[str, Any] ) -> Tuple: _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = F''' {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --seed=42 --checkpointing_steps epoch --with_tracking '''.split() if is_cuda_and_apex_available(): testargs.append('''--fp16''' ) run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(__UpperCamelCase ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 ) self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''glue_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def _UpperCamelCase ( self : str ) -> Dict: _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = F''' {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --block_size 128 --per_device_train_batch_size 5 --per_device_eval_batch_size 5 --num_train_epochs 2 --output_dir {tmp_dir} --checkpointing_steps epoch --with_tracking '''.split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(__UpperCamelCase ) self.assertLess(result['''perplexity'''] , 100 ) self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''clm_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def _UpperCamelCase ( self : List[str] ) -> Tuple: _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = F''' {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --num_train_epochs=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(__UpperCamelCase ) self.assertLess(result['''perplexity'''] , 42 ) self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''mlm_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def _UpperCamelCase ( self : List[str] ) -> str: # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu _UpperCamelCase = 7 if get_gpu_count() > 1 else 2 _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = F''' {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(__UpperCamelCase ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 ) self.assertLess(result['''train_loss'''] , 0.5 ) self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''ner_no_trainer''' ) ) ) @unittest.skip(reason='''Fix me @muellerzr''' ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def _UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]: _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = F''' {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --seed=42 --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(__UpperCamelCase ) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result['''eval_f1'''] , 28 ) self.assertGreaterEqual(result['''eval_exact'''] , 28 ) self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''qa_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def _UpperCamelCase ( self : Optional[Any] ) -> str: _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = F''' {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/swag/sample.json --validation_file tests/fixtures/tests_samples/swag/sample.json --output_dir {tmp_dir} --max_train_steps=20 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --with_tracking '''.split() run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(__UpperCamelCase ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 ) self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''swag_no_trainer''' ) ) ) @slow @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def _UpperCamelCase ( self : int ) -> Optional[Any]: _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = F''' {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(__UpperCamelCase ) self.assertGreaterEqual(result['''eval_rouge1'''] , 10 ) self.assertGreaterEqual(result['''eval_rouge2'''] , 2 ) self.assertGreaterEqual(result['''eval_rougeL'''] , 7 ) self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 ) self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''summarization_no_trainer''' ) ) ) @slow @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def _UpperCamelCase ( self : str ) -> str: _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = F''' {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py --model_name_or_path sshleifer/student_marian_en_ro_6_1 --source_lang en --target_lang ro --train_file tests/fixtures/tests_samples/wmt16/sample.json --validation_file tests/fixtures/tests_samples/wmt16/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --num_beams=6 --learning_rate=3e-3 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --source_lang en_XX --target_lang ro_RO --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(__UpperCamelCase ) self.assertGreaterEqual(result['''eval_bleu'''] , 30 ) self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''translation_no_trainer''' ) ) ) @slow def _UpperCamelCase ( self : Any ) -> List[Any]: _UpperCamelCase = logging.StreamHandler(sys.stdout ) logger.addHandler(__UpperCamelCase ) _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = F''' {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py --dataset_name huggingface/semantic-segmentation-test-sample --output_dir {tmp_dir} --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch '''.split() run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(__UpperCamelCase ) self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.1_0 ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def _UpperCamelCase ( self : List[Any] ) -> Optional[Any]: _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = F''' {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py --model_name_or_path google/vit-base-patch16-224-in21k --dataset_name hf-internal-testing/cats_vs_dogs_sample --learning_rate 1e-4 --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --max_train_steps 2 --train_val_split 0.1 --seed 42 --output_dir {tmp_dir} --with_tracking --checkpointing_steps 1 '''.split() if is_cuda_and_apex_available(): testargs.append('''--fp16''' ) run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(__UpperCamelCase ) # The base model scores a 25% self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 ) self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''step_1''' ) ) ) self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''image_classification_no_trainer''' ) ) )
256
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mobilebert import MobileBertTokenizer __A = logging.get_logger(__name__) __A = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} __A = { "vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"}, "tokenizer_file": { "mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json" }, } __A = {"mobilebert-uncased": 5_1_2} __A = {} class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Any = VOCAB_FILES_NAMES _UpperCAmelCase :int = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase :Dict = PRETRAINED_INIT_CONFIGURATION _UpperCAmelCase :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase :Dict = MobileBertTokenizer def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase="[UNK]" , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="[PAD]" , _UpperCAmelCase="[CLS]" , _UpperCAmelCase="[MASK]" , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ): super().__init__( _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , tokenize_chinese_chars=_UpperCAmelCase , strip_accents=_UpperCAmelCase , **_UpperCAmelCase , ) lowercase__: Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , _UpperCAmelCase ) != do_lower_case or normalizer_state.get('''strip_accents''' , _UpperCAmelCase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , _UpperCAmelCase ) != tokenize_chinese_chars ): lowercase__: Optional[Any] = getattr(_UpperCAmelCase , normalizer_state.pop('''type''' ) ) lowercase__: Any = do_lower_case lowercase__: List[Any] = strip_accents lowercase__: List[Any] = tokenize_chinese_chars lowercase__: Tuple = normalizer_class(**_UpperCAmelCase ) lowercase__: Dict = do_lower_case def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=None ): lowercase__: Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ): lowercase__: Union[str, Any] = [self.sep_token_id] lowercase__: Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ): lowercase__: Optional[Any] = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase ) return tuple(_UpperCAmelCase )
2
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str: lowercase__: int = '''''' for word_or_phrase in separated: if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise Exception('''join() accepts only strings to be joined''' ) joined += word_or_phrase + separator return joined.strip(__UpperCAmelCase ) if __name__ == "__main__": from doctest import testmod testmod()
2
1
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class lowercase ( unittest.TestCase ): def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=18 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=True , lowercase=None , ) -> Any: lowerCAmelCase = size if size is not None else {"""shortest_edge""": 20} lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = num_channels lowerCAmelCase = image_size lowerCAmelCase = min_resolution lowerCAmelCase = max_resolution lowerCAmelCase = do_resize lowerCAmelCase = size lowerCAmelCase = do_center_crop lowerCAmelCase = crop_size def _snake_case ( self ) -> Any: return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, } @require_torch @require_vision class lowercase ( _UpperCAmelCase , unittest.TestCase ): _SCREAMING_SNAKE_CASE = MobileNetVaImageProcessor if is_vision_available() else None def _snake_case ( self ) -> List[str]: lowerCAmelCase = MobileNetVaImageProcessingTester(self ) @property def _snake_case ( self ) -> List[str]: return self.image_processor_tester.prepare_image_processor_dict() def _snake_case ( self ) -> Dict: lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowercase , """do_resize""" ) ) self.assertTrue(hasattr(lowercase , """size""" ) ) self.assertTrue(hasattr(lowercase , """do_center_crop""" ) ) self.assertTrue(hasattr(lowercase , """crop_size""" ) ) def _snake_case ( self ) -> Union[str, Any]: lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 20} ) self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} ) lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} ) def _snake_case ( self ) -> Dict: pass def _snake_case ( self ) -> Optional[int]: # Initialize image_processing lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase ) for image in image_inputs: self.assertIsInstance(lowercase , Image.Image ) # Test not batched input lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowerCAmelCase = image_processing(lowercase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def _snake_case ( self ) -> Any: # Initialize image_processing lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase ) for image in image_inputs: self.assertIsInstance(lowercase , np.ndarray ) # Test not batched input lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowerCAmelCase = image_processing(lowercase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def _snake_case ( self ) -> Tuple: # Initialize image_processing lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase ) for image in image_inputs: self.assertIsInstance(lowercase , torch.Tensor ) # Test not batched input lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowerCAmelCase = image_processing(lowercase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
46
import math def lowerCamelCase__ ( __lowerCAmelCase : int ): """simple docstring""" lowerCAmelCase_ = 0 lowerCAmelCase_ = 0 while num > 0: lowerCAmelCase_ = num % 8 lowerCAmelCase_ = octal + (remainder * math.floor(math.pow(10 , __lowerCAmelCase ) )) counter += 1 lowerCAmelCase_ = math.floor(num / 8 ) # basically /= 8 without remainder if any # This formatting removes trailing '.0' from `octal`. return F"""0o{int(__lowerCAmelCase )}""" def lowerCamelCase__ ( ): """simple docstring""" print("\n2 in octal is:" ) print(decimal_to_octal(2 ) ) # = 2 print("\n8 in octal is:" ) print(decimal_to_octal(8 ) ) # = 10 print("\n65 in octal is:" ) print(decimal_to_octal(65 ) ) # = 101 print("\n216 in octal is:" ) print(decimal_to_octal(216 ) ) # = 330 print("\n512 in octal is:" ) print(decimal_to_octal(512 ) ) # = 1000 print("\n" ) if __name__ == "__main__": main()
231
0
"""simple docstring""" import os from typing import Dict, List, Tuple, TypeVar, Union _a = TypeVar('T') _a = Union[List[T], Tuple[T, ...]] _a = Union[T, List[T], Dict[str, T]] _a = Union[str, bytes, os.PathLike]
23
"""simple docstring""" import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _a = logging.get_logger(__name__) _a = {'vocab_file': 'vocab.json'} _a = { 'vocab_file': { 'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json', } } _a = {'mgp-str': 27} class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , lowercase_ , lowercase_="[GO]" , lowercase_="[GO]" , lowercase_="[s]" , lowercase_="[GO]" , **lowercase_ ): """simple docstring""" super().__init__( unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , pad_token=lowercase_ , **lowercase_ , ) with open(lowercase_ , encoding="utf-8" ) as vocab_handle: UpperCAmelCase_ : Dict = json.load(lowercase_ ) UpperCAmelCase_ : Dict = {v: k for k, v in self.vocab.items()} @property def UpperCamelCase__ ( self ): """simple docstring""" return len(self.vocab ) def UpperCamelCase__ ( self ): """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[int] = [] for s in text: char_tokens.extend(lowercase_ ) return char_tokens def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" return self.vocab.get(lowercase_ , self.vocab.get(self.unk_token ) ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" return self.decoder.get(lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ): """simple docstring""" if not os.path.isdir(lowercase_ ): logger.error("Vocabulary path ({}) should be a directory".format(lowercase_ ) ) return UpperCAmelCase_ : Optional[int] = os.path.join( lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) with open(lowercase_ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowercase_ , ensure_ascii=lowercase_ ) + "\n" ) return (vocab_file,)
23
1
import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str: lowerCamelCase__ : Optional[int] = [ 'encoder.version', 'decoder.version', 'model.encoder.version', 'model.decoder.version', 'decoder.output_projection.weight', '_float_tensor', 'encoder.embed_positions._float_tensor', 'decoder.embed_positions._float_tensor', ] for k in ignore_keys: state_dict.pop(_UpperCAmelCase , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[Any]: lowerCamelCase__ , lowerCamelCase__ : List[str] = emb.weight.shape lowerCamelCase__ : Tuple = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase ) lowerCamelCase__ : Dict = emb.weight.data return lin_layer def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str: lowerCamelCase__ : Tuple = torch.load(_UpperCAmelCase , map_location='cpu' ) lowerCamelCase__ : List[str] = mam_aaa['args'] or mam_aaa['cfg']['model'] lowerCamelCase__ : Optional[int] = mam_aaa['model'] remove_ignore_keys_(_UpperCAmelCase ) lowerCamelCase__ : str = state_dict['encoder.embed_tokens.weight'].shape[0] lowerCamelCase__ : Union[str, Any] = MaMaaaConfig( vocab_size=_UpperCAmelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , ) lowerCamelCase__ : Optional[Any] = state_dict['decoder.embed_tokens.weight'] lowerCamelCase__ : Union[str, Any] = MaMaaaForConditionalGeneration(_UpperCAmelCase ) model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase ) lowerCamelCase__ : List[str] = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": _UpperCAmelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""") parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") _UpperCAmelCase : str = parser.parse_args() _UpperCAmelCase : Optional[Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
50
"""simple docstring""" from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ : Union[str, Any] = logging.get_logger(__name__) lowercase__ : List[str] = { """huggingface/informer-tourism-monthly""": ( """https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json""" ), # See all Informer models at https://huggingface.co/models?filter=informer } class UpperCamelCase__ ( lowercase_ ): """simple docstring""" _SCREAMING_SNAKE_CASE = """informer""" _SCREAMING_SNAKE_CASE = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", """num_hidden_layers""": """encoder_layers""", } def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : str = "student_t" , SCREAMING_SNAKE_CASE_ : str = "nll" , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : List[int] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, bool]] = "mean" , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : int = 6_4 , SCREAMING_SNAKE_CASE_ : int = 3_2 , SCREAMING_SNAKE_CASE_ : int = 3_2 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : str = "gelu" , SCREAMING_SNAKE_CASE_ : float = 0.05 , SCREAMING_SNAKE_CASE_ : float = 0.1 , SCREAMING_SNAKE_CASE_ : float = 0.1 , SCREAMING_SNAKE_CASE_ : float = 0.1 , SCREAMING_SNAKE_CASE_ : float = 0.1 , SCREAMING_SNAKE_CASE_ : int = 1_0_0 , SCREAMING_SNAKE_CASE_ : float = 0.02 , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : str = "prob" , SCREAMING_SNAKE_CASE_ : int = 5 , SCREAMING_SNAKE_CASE_ : bool = True , **SCREAMING_SNAKE_CASE_ : int , ): # time series specific configuration lowerCAmelCase_ : Dict = prediction_length lowerCAmelCase_ : List[str] = context_length or prediction_length lowerCAmelCase_ : List[Any] = distribution_output lowerCAmelCase_ : int = loss lowerCAmelCase_ : Optional[int] = input_size lowerCAmelCase_ : Tuple = num_time_features lowerCAmelCase_ : List[str] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] lowerCAmelCase_ : int = scaling lowerCAmelCase_ : List[Any] = num_dynamic_real_features lowerCAmelCase_ : Union[str, Any] = num_static_real_features lowerCAmelCase_ : Optional[int] = num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(SCREAMING_SNAKE_CASE_ ) != num_static_categorical_features: raise ValueError( 'The cardinality should be a list of the same length as `num_static_categorical_features`' ) lowerCAmelCase_ : str = cardinality else: lowerCAmelCase_ : Any = [0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(SCREAMING_SNAKE_CASE_ ) != num_static_categorical_features: raise ValueError( 'The embedding dimension should be a list of the same length as `num_static_categorical_features`' ) lowerCAmelCase_ : Optional[int] = embedding_dimension else: lowerCAmelCase_ : Union[str, Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality] lowerCAmelCase_ : Optional[int] = num_parallel_samples # Transformer architecture configuration lowerCAmelCase_ : Any = input_size * len(self.lags_sequence ) + self._number_of_features lowerCAmelCase_ : Any = d_model lowerCAmelCase_ : Union[str, Any] = encoder_attention_heads lowerCAmelCase_ : Optional[Any] = decoder_attention_heads lowerCAmelCase_ : Any = encoder_ffn_dim lowerCAmelCase_ : List[str] = decoder_ffn_dim lowerCAmelCase_ : Optional[Any] = encoder_layers lowerCAmelCase_ : Tuple = decoder_layers lowerCAmelCase_ : Optional[int] = dropout lowerCAmelCase_ : Dict = attention_dropout lowerCAmelCase_ : int = activation_dropout lowerCAmelCase_ : Dict = encoder_layerdrop lowerCAmelCase_ : str = decoder_layerdrop lowerCAmelCase_ : Union[str, Any] = activation_function lowerCAmelCase_ : Union[str, Any] = init_std lowerCAmelCase_ : Union[str, Any] = use_cache # Informer lowerCAmelCase_ : Optional[int] = attention_type lowerCAmelCase_ : Any = sampling_factor lowerCAmelCase_ : int = distil super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) @property def SCREAMING_SNAKE_CASE__ ( self : Any ): return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
224
0
"""simple docstring""" from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class lowerCamelCase__ : '''simple docstring''' _A = XGLMConfig _A = {} _A = 'gelu' def __init__( self :Tuple , a :Dict , a :List[str]=1_4 , a :Optional[int]=7 , a :List[Any]=True , a :Dict=True , a :Any=True , a :Dict=9_9 , a :Union[str, Any]=3_2 , a :List[str]=2 , a :Union[str, Any]=4 , a :Optional[int]=3_7 , a :Optional[int]="gelu" , a :Union[str, Any]=0.1 , a :List[str]=0.1 , a :Optional[Any]=5_1_2 , a :Optional[Any]=0.02 , ) -> Optional[int]: __UpperCamelCase : Any = parent __UpperCamelCase : Dict = batch_size __UpperCamelCase : Optional[Any] = seq_length __UpperCamelCase : int = is_training __UpperCamelCase : List[Any] = use_input_mask __UpperCamelCase : Tuple = use_labels __UpperCamelCase : Tuple = vocab_size __UpperCamelCase : Optional[Any] = d_model __UpperCamelCase : Tuple = num_hidden_layers __UpperCamelCase : Tuple = num_attention_heads __UpperCamelCase : Optional[Any] = ffn_dim __UpperCamelCase : int = activation_function __UpperCamelCase : Dict = activation_dropout __UpperCamelCase : List[str] = attention_dropout __UpperCamelCase : Optional[int] = max_position_embeddings __UpperCamelCase : Union[str, Any] = initializer_range __UpperCamelCase : Optional[Any] = None __UpperCamelCase : Any = 0 __UpperCamelCase : List[Any] = 2 __UpperCamelCase : int = 1 def _lowerCamelCase ( self :List[Any] ) -> List[str]: return XGLMConfig.from_pretrained("facebook/xglm-564M" ) def _lowerCamelCase ( self :Any ) -> List[str]: __UpperCamelCase : List[Any] = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 ) __UpperCamelCase : List[str] = None if self.use_input_mask: __UpperCamelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCamelCase : int = self.get_config() __UpperCamelCase : Optional[int] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, ) def _lowerCamelCase ( self :Optional[Any] ) -> List[Any]: return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=a , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=a , ) def _lowerCamelCase ( self :str ) -> Optional[Any]: __UpperCamelCase : Dict = self.prepare_config_and_inputs() ( __UpperCamelCase ) : List[str] = config_and_inputs __UpperCamelCase : Any = { "input_ids": input_ids, "head_mask": head_mask, } return config, inputs_dict @require_tf class lowerCamelCase__ ( __lowercase , __lowercase , unittest.TestCase): '''simple docstring''' _A = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () _A = (TFXGLMForCausalLM,) if is_tf_available() else () _A = ( {'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {} ) _A = False _A = False _A = False def _lowerCamelCase ( self :Any ) -> Any: __UpperCamelCase : Optional[int] = TFXGLMModelTester(self ) __UpperCamelCase : Optional[int] = ConfigTester(self , config_class=a , n_embd=3_7 ) def _lowerCamelCase ( self :Any ) -> Dict: self.config_tester.run_common_tests() @slow def _lowerCamelCase ( self :Dict ) -> Optional[int]: for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCamelCase : Any = TFXGLMModel.from_pretrained(a ) self.assertIsNotNone(a ) @unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." ) def _lowerCamelCase ( self :List[Any] ) -> Any: super().test_resize_token_embeddings() @require_tf class lowerCamelCase__ ( unittest.TestCase): '''simple docstring''' @slow def _lowerCamelCase ( self :str , a :int=True ) -> Union[str, Any]: __UpperCamelCase : Union[str, Any] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" ) __UpperCamelCase : Any = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]] , dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off __UpperCamelCase : Union[str, Any] = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1] # fmt: on __UpperCamelCase : Optional[int] = model.generate(a , do_sample=a , num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , a ) @slow def _lowerCamelCase ( self :Dict ) -> Optional[Any]: __UpperCamelCase : Any = XGLMTokenizer.from_pretrained("facebook/xglm-564M" ) __UpperCamelCase : int = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" ) tf.random.set_seed(0 ) __UpperCamelCase : Optional[Any] = tokenizer("Today is a nice day and" , return_tensors="tf" ) __UpperCamelCase : Tuple = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(":/CPU:0" ): __UpperCamelCase : int = model.generate(a , do_sample=a , seed=[7, 0] ) __UpperCamelCase : Tuple = tokenizer.decode(output_ids[0] , skip_special_tokens=a ) __UpperCamelCase : Dict = ( "Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due" ) self.assertEqual(a , a ) @slow def _lowerCamelCase ( self :Any ) -> List[str]: __UpperCamelCase : int = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" ) __UpperCamelCase : Optional[int] = XGLMTokenizer.from_pretrained("facebook/xglm-564M" ) __UpperCamelCase : int = "left" # use different length sentences to test batching __UpperCamelCase : Dict = [ "This is an extremelly long sentence that only exists to test the ability of the model to cope with " "left-padding, such as in batched generation. The output for the sequence below should be the same " "regardless of whether left padding is applied or not. When", "Hello, my dog is a little", ] __UpperCamelCase : List[str] = tokenizer(a , return_tensors="tf" , padding=a ) __UpperCamelCase : Any = inputs["input_ids"] __UpperCamelCase : Optional[Any] = model.generate(input_ids=a , attention_mask=inputs["attention_mask"] , max_new_tokens=1_2 ) __UpperCamelCase : Dict = tokenizer(sentences[0] , return_tensors="tf" ).input_ids __UpperCamelCase : Union[str, Any] = model.generate(input_ids=a , max_new_tokens=1_2 ) __UpperCamelCase : Optional[int] = tokenizer(sentences[1] , return_tensors="tf" ).input_ids __UpperCamelCase : Union[str, Any] = model.generate(input_ids=a , max_new_tokens=1_2 ) __UpperCamelCase : Tuple = tokenizer.batch_decode(a , skip_special_tokens=a ) __UpperCamelCase : List[str] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=a ) __UpperCamelCase : Dict = tokenizer.decode(output_padded[0] , skip_special_tokens=a ) __UpperCamelCase : List[Any] = [ "This is an extremelly long sentence that only exists to test the ability of the model to cope with " "left-padding, such as in batched generation. The output for the sequence below should be the same " "regardless of whether left padding is applied or not. When left padding is applied, the sequence will be " "a single", "Hello, my dog is a little bit of a shy one, but he is very friendly", ] self.assertListEqual(a , a ) self.assertListEqual(a , [non_padded_sentence, padded_sentence] )
371
from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class lowerCamelCase__ : '''simple docstring''' _A = 42 _A = 42 class lowerCamelCase__ : '''simple docstring''' def __init__( self :Optional[Any] , a :int ) -> Tuple: __UpperCamelCase : list[list[Edge]] = [[] for _ in range(a )] __UpperCamelCase : str = size def __getitem__( self :str , a :int ) -> Iterator[Edge]: return iter(self._graph[vertex] ) @property def _lowerCamelCase ( self :Any ) -> List[str]: return self._size def _lowerCamelCase ( self :Dict , a :int , a :int , a :int ) -> Any: if weight not in (0, 1): raise ValueError("Edge weight must be either 0 or 1." ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError("Vertex indexes must be in [0; size)." ) self._graph[from_vertex].append(Edge(a , a ) ) def _lowerCamelCase ( self :List[str] , a :int , a :int ) -> int | None: __UpperCamelCase : Union[str, Any] = deque([start_vertex] ) __UpperCamelCase : list[int | None] = [None] * self.size __UpperCamelCase : Dict = 0 while queue: __UpperCamelCase : Tuple = queue.popleft() __UpperCamelCase : int = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: __UpperCamelCase : Optional[Any] = current_distance + edge.weight __UpperCamelCase : Dict = distances[edge.destination_vertex] if ( isinstance(a , a ) and new_distance >= dest_vertex_distance ): continue __UpperCamelCase : Optional[Any] = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError("No path from start_vertex to finish_vertex." ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
151
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a__ : Tuple ={'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : List[Any] =[ '''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''WavLMForAudioFrameClassification''', '''WavLMForCTC''', '''WavLMForSequenceClassification''', '''WavLMForXVector''', '''WavLMModel''', '''WavLMPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavlm import ( WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST, WavLMForAudioFrameClassification, WavLMForCTC, WavLMForSequenceClassification, WavLMForXVector, WavLMModel, WavLMPreTrainedModel, ) else: import sys a__ : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
53
"""simple docstring""" import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed __UpperCamelCase : List[str] = '''true''' def __SCREAMING_SNAKE_CASE ( A_ , A_=82 , A_=16 ): set_seed(42 ) lowerCAmelCase__ : Union[str, Any] = RegressionModel() lowerCAmelCase__ : Optional[int] = deepcopy(A_ ) lowerCAmelCase__ : Any = RegressionDataset(length=A_ ) lowerCAmelCase__ : List[str] = DataLoader(A_ , batch_size=A_ ) model.to(accelerator.device ) lowerCAmelCase__ ,lowerCAmelCase__ : Dict = accelerator.prepare(A_ , A_ ) return model, ddp_model, dataloader def __SCREAMING_SNAKE_CASE ( A_ , A_=False ): lowerCAmelCase__ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' ) lowerCAmelCase__ : List[str] = load_dataset('''glue''' , '''mrpc''' , split='''validation''' ) def tokenize_function(A_ ): lowerCAmelCase__ : Optional[Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=A_ , max_length=A_ ) return outputs with accelerator.main_process_first(): lowerCAmelCase__ : Dict = dataset.map( A_ , batched=A_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) lowerCAmelCase__ : int = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(A_ ): if use_longest: return tokenizer.pad(A_ , padding='''longest''' , return_tensors='''pt''' ) return tokenizer.pad(A_ , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' ) return DataLoader(A_ , shuffle=A_ , collate_fn=A_ , batch_size=16 ) def __SCREAMING_SNAKE_CASE ( A_ , A_ ): lowerCAmelCase__ : Union[str, Any] = Accelerator(dispatch_batches=A_ , split_batches=A_ ) lowerCAmelCase__ : str = get_dataloader(A_ , not dispatch_batches ) lowerCAmelCase__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained( '''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=A_ ) lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = accelerator.prepare(A_ , A_ ) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ): lowerCAmelCase__ : Union[str, Any] = [] for batch in dataloader: lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = batch.values() with torch.no_grad(): lowerCAmelCase__ : List[str] = model(A_ ) lowerCAmelCase__ ,lowerCAmelCase__ : str = accelerator.gather_for_metrics((logit, target) ) logits_and_targets.append((logit, target) ) lowerCAmelCase__ ,lowerCAmelCase__ : int = [], [] for logit, targ in logits_and_targets: logits.append(A_ ) targs.append(A_ ) lowerCAmelCase__ ,lowerCAmelCase__ : List[str] = torch.cat(A_ ), torch.cat(A_ ) return logits, targs def __SCREAMING_SNAKE_CASE ( A_ , A_=82 , A_=False , A_=False , A_=16 ): lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = get_basic_setup(A_ , A_ , A_ ) lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = generate_predictions(A_ , A_ , A_ ) assert ( len(A_ ) == num_samples ), f'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(A_ )}' def __SCREAMING_SNAKE_CASE ( A_ = False , A_ = False ): lowerCAmelCase__ : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' ) lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = get_mrpc_setup(A_ , A_ ) # First do baseline lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Any = setup['''no'''] model.to(A_ ) model.eval() for batch in dataloader: batch.to(A_ ) with torch.inference_mode(): lowerCAmelCase__ : Optional[int] = model(**A_ ) lowerCAmelCase__ : Dict = outputs.logits.argmax(dim=-1 ) metric.add_batch(predictions=A_ , references=batch['''labels'''] ) lowerCAmelCase__ : Dict = metric.compute() # Then do distributed lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = setup['''ddp'''] model.eval() for batch in dataloader: with torch.inference_mode(): lowerCAmelCase__ : Union[str, Any] = model(**A_ ) lowerCAmelCase__ : int = outputs.logits.argmax(dim=-1 ) lowerCAmelCase__ : int = batch['''labels'''] lowerCAmelCase__ ,lowerCAmelCase__ : int = accelerator.gather_for_metrics((preds, references) ) metric.add_batch(predictions=A_ , references=A_ ) lowerCAmelCase__ : List[Any] = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key] ), f'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n' def __SCREAMING_SNAKE_CASE ( ): lowerCAmelCase__ : List[str] = Accelerator(split_batches=A_ , dispatch_batches=A_ ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print('''**Testing gather_for_metrics**''' ) for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' ) test_mrpc(A_ , A_ ) accelerator.state._reset_state() if accelerator.is_local_main_process: print('''**Test torch metrics**''' ) for split_batches in [True, False]: for dispatch_batches in [True, False]: lowerCAmelCase__ : Optional[Any] = Accelerator(split_batches=A_ , dispatch_batches=A_ ) if accelerator.is_local_main_process: print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' ) test_torch_metrics(A_ , 99 ) accelerator.state._reset_state() if accelerator.is_local_main_process: print('''**Test last batch is not dropped when perfectly divisible**''' ) lowerCAmelCase__ : List[str] = Accelerator() test_torch_metrics(A_ , 5_12 ) accelerator.state._reset_state() def __SCREAMING_SNAKE_CASE ( A_ ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
106
0
"""simple docstring""" from collections.abc import Callable def lowercase ( _SCREAMING_SNAKE_CASE : Callable[[float], float] , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ): '''simple docstring''' _UpperCAmelCase = a _UpperCAmelCase = b if function(_SCREAMING_SNAKE_CASE ) == 0: # one of the a or b is a root for the function return a elif function(_SCREAMING_SNAKE_CASE ) == 0: return b elif ( function(_SCREAMING_SNAKE_CASE ) * function(_SCREAMING_SNAKE_CASE ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('''could not find root in given interval.''' ) else: _UpperCAmelCase = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(_SCREAMING_SNAKE_CASE ) == 0: return mid elif function(_SCREAMING_SNAKE_CASE ) * function(_SCREAMING_SNAKE_CASE ) < 0: _UpperCAmelCase = mid else: _UpperCAmelCase = mid _UpperCAmelCase = start + (end - start) / 2.0 return mid def lowercase ( _SCREAMING_SNAKE_CASE : float ): '''simple docstring''' return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1000)) import doctest doctest.testmod()
326
"""simple docstring""" import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' with open(_SCREAMING_SNAKE_CASE ) as metadata_file: _UpperCAmelCase = json.load(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = LukeConfig(use_entity_aware_attention=_SCREAMING_SNAKE_CASE , **metadata['''model_config'''] ) # Load in the weights from the checkpoint_path _UpperCAmelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location='''cpu''' )['''module'''] # Load the entity vocab file _UpperCAmelCase = load_original_entity_vocab(_SCREAMING_SNAKE_CASE ) # add an entry for [MASK2] _UpperCAmelCase = max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 _UpperCAmelCase = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] ) # Add special tokens to the token vocabulary for downstream tasks _UpperCAmelCase = AddedToken('''<ent>''' , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = AddedToken('''<ent2>''' , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(f'Saving tokenizer to {pytorch_dump_folder_path}' ) tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) with open(os.path.join(_SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) , '''r''' ) as f: _UpperCAmelCase = json.load(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = '''MLukeTokenizer''' with open(os.path.join(_SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) , '''w''' ) as f: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) with open(os.path.join(_SCREAMING_SNAKE_CASE , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) # Initialize the embeddings of the special tokens _UpperCAmelCase = tokenizer.convert_tokens_to_ids(['''@'''] )[0] _UpperCAmelCase = tokenizer.convert_tokens_to_ids(['''#'''] )[0] _UpperCAmelCase = state_dict['''embeddings.word_embeddings.weight'''] _UpperCAmelCase = word_emb[ent_init_index].unsqueeze(0 ) _UpperCAmelCase = word_emb[enta_init_index].unsqueeze(0 ) _UpperCAmelCase = torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: _UpperCAmelCase = state_dict[bias_name] _UpperCAmelCase = decoder_bias[ent_init_index].unsqueeze(0 ) _UpperCAmelCase = decoder_bias[enta_init_index].unsqueeze(0 ) _UpperCAmelCase = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: _UpperCAmelCase = f'encoder.layer.{layer_index}.attention.self.' _UpperCAmelCase = state_dict[prefix + matrix_name] _UpperCAmelCase = state_dict[prefix + matrix_name] _UpperCAmelCase = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks _UpperCAmelCase = state_dict['''entity_embeddings.entity_embeddings.weight'''] _UpperCAmelCase = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 ) _UpperCAmelCase = torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' _UpperCAmelCase = state_dict['''entity_predictions.bias'''] _UpperCAmelCase = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 ) _UpperCAmelCase = torch.cat([entity_prediction_bias, entity_mask_bias] ) _UpperCAmelCase = LukeForMaskedLM(config=_SCREAMING_SNAKE_CASE ).eval() state_dict.pop('''entity_predictions.decoder.weight''' ) state_dict.pop('''lm_head.decoder.weight''' ) state_dict.pop('''lm_head.decoder.bias''' ) _UpperCAmelCase = OrderedDict() for key, value in state_dict.items(): if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )): _UpperCAmelCase = state_dict[key] else: _UpperCAmelCase = state_dict[key] _UpperCAmelCase , _UpperCAmelCase = model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE ) if set(_SCREAMING_SNAKE_CASE ) != {"luke.embeddings.position_ids"}: raise ValueError(f'Unexpected unexpected_keys: {unexpected_keys}' ) if set(_SCREAMING_SNAKE_CASE ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(f'Unexpected missing_keys: {missing_keys}' ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs _UpperCAmelCase = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , task='''entity_classification''' ) _UpperCAmelCase = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).''' _UpperCAmelCase = (0, 9) _UpperCAmelCase = tokenizer(_SCREAMING_SNAKE_CASE , entity_spans=[span] , return_tensors='''pt''' ) _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base _UpperCAmelCase = torch.Size((1, 33, 768) ) _UpperCAmelCase = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base _UpperCAmelCase = torch.Size((1, 1, 768) ) _UpperCAmelCase = torch.tensor([[-0.1482, 0.0609, 0.0322]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is' f' {expected_shape}' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ): raise ValueError # Verify masked word/entity prediction _UpperCAmelCase = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = '''Tokyo is the capital of <mask>.''' _UpperCAmelCase = (24, 30) _UpperCAmelCase = tokenizer(_SCREAMING_SNAKE_CASE , entity_spans=[span] , return_tensors='''pt''' ) _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = encoding['''input_ids'''][0].tolist() _UpperCAmelCase = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) ) _UpperCAmelCase = outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = outputs.entity_logits[0][0].argmax().item() _UpperCAmelCase = [ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print('''Saving PyTorch model to {}'''.format(_SCREAMING_SNAKE_CASE ) ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) def lowercase ( _SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' _UpperCAmelCase = ['''[MASK]''', '''[PAD]''', '''[UNK]'''] _UpperCAmelCase = [json.loads(_SCREAMING_SNAKE_CASE ) for line in open(_SCREAMING_SNAKE_CASE )] _UpperCAmelCase = {} for entry in data: _UpperCAmelCase = entry['''id'''] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: _UpperCAmelCase = entity_id break _UpperCAmelCase = f'{language}:{entity_name}' _UpperCAmelCase = entity_id return new_mapping if __name__ == "__main__": __A : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.") parser.add_argument( "--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration." ) parser.add_argument( "--entity_vocab_path", default=None, type=str, help="Path to an entity_vocab.tsv file, containing the entity vocabulary.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model." ) parser.add_argument( "--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted." ) __A : List[str] = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
326
1
from __future__ import annotations import bisect def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 0 , SCREAMING_SNAKE_CASE__ = -1) -> int: if hi < 0: __snake_case: Any = len(SCREAMING_SNAKE_CASE__) while lo < hi: __snake_case: Any = lo + (hi - lo) // 2 if sorted_collection[mid] < item: __snake_case: List[Any] = mid + 1 else: __snake_case: Tuple = mid return lo def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 0 , SCREAMING_SNAKE_CASE__ = -1) -> int: if hi < 0: __snake_case: List[str] = len(SCREAMING_SNAKE_CASE__) while lo < hi: __snake_case: Union[str, Any] = lo + (hi - lo) // 2 if sorted_collection[mid] <= item: __snake_case: List[Any] = mid + 1 else: __snake_case: Tuple = mid return lo def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 0 , SCREAMING_SNAKE_CASE__ = -1) -> None: sorted_collection.insert(bisect_left(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) , SCREAMING_SNAKE_CASE__) def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 0 , SCREAMING_SNAKE_CASE__ = -1) -> None: sorted_collection.insert(bisect_right(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) , SCREAMING_SNAKE_CASE__) def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> int | None: __snake_case: Tuple = 0 __snake_case: str = len(SCREAMING_SNAKE_CASE__) - 1 while left <= right: __snake_case: str = left + (right - left) // 2 __snake_case: List[str] = sorted_collection[midpoint] if current_item == item: return midpoint elif item < current_item: __snake_case: List[str] = midpoint - 1 else: __snake_case: Optional[Any] = midpoint + 1 return None def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> int | None: __snake_case: List[str] = bisect.bisect_left(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) if index != len(SCREAMING_SNAKE_CASE__) and sorted_collection[index] == item: return index return None def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> int | None: if right < left: return None __snake_case: Optional[int] = left + (right - left) // 2 if sorted_collection[midpoint] == item: return midpoint elif sorted_collection[midpoint] > item: return binary_search_by_recursion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , midpoint - 1) else: return binary_search_by_recursion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , midpoint + 1 , SCREAMING_SNAKE_CASE__) if __name__ == "__main__": __UpperCAmelCase : List[Any] = input("Enter numbers separated by comma:\n").strip() __UpperCAmelCase : Union[str, Any] = sorted(int(item) for item in user_input.split(",")) __UpperCAmelCase : str = int(input("Enter a single number to be found in the list:\n")) __UpperCAmelCase : Optional[Any] = binary_search(collection, target) if result is None: print(f'{target} was not found in {collection}.') else: print(f'{target} was found at position {result} in {collection}.')
111
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class __snake_case ( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase__ ( self : Any , A : Dict , A : Any ): return f'''gaussian_noise_s={seed}_shape={'_'.join([str(A ) for s in shape] )}.npy''' def UpperCAmelCase__ ( self : Optional[Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() def UpperCAmelCase__ ( self : Optional[Any] , A : Optional[int]=0 , A : Tuple=(4, 4, 64, 64) , A : Tuple=False ): __snake_case: Dict = jnp.bfloataa if fpaa else jnp.floataa __snake_case: str = jnp.array(load_hf_numpy(self.get_file_format(A , A ) ) , dtype=A ) return image def UpperCAmelCase__ ( self : Union[str, Any] , A : Any=False , A : Optional[Any]="CompVis/stable-diffusion-v1-4" ): __snake_case: List[Any] = jnp.bfloataa if fpaa else jnp.floataa __snake_case: Union[str, Any] = """bf16""" if fpaa else None __snake_case , __snake_case: Optional[int] = FlaxUNetaDConditionModel.from_pretrained( A , subfolder="""unet""" , dtype=A , revision=A ) return model, params def UpperCAmelCase__ ( self : Tuple , A : Tuple=0 , A : str=(4, 77, 768) , A : List[str]=False ): __snake_case: Any = jnp.bfloataa if fpaa else jnp.floataa __snake_case: Dict = jnp.array(load_hf_numpy(self.get_file_format(A , A ) ) , dtype=A ) return hidden_states @parameterized.expand( [ # fmt: off [83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]], [17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]], [8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]], [3, 1_000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]], # fmt: on ] ) def UpperCAmelCase__ ( self : Optional[Any] , A : Optional[Any] , A : str , A : Any ): __snake_case , __snake_case: Union[str, Any] = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""" , fpaa=A ) __snake_case: Tuple = self.get_latents(A , fpaa=A ) __snake_case: int = self.get_encoder_hidden_states(A , fpaa=A ) __snake_case: List[Any] = model.apply( {"""params""": params} , A , jnp.array(A , dtype=jnp.intaa ) , encoder_hidden_states=A , ).sample assert sample.shape == latents.shape __snake_case: str = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) __snake_case: Optional[int] = jnp.array(A , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(A , A , atol=1E-2 ) @parameterized.expand( [ # fmt: off [83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]], [17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]], [8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]], [3, 1_000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]], # fmt: on ] ) def UpperCAmelCase__ ( self : Optional[Any] , A : int , A : Tuple , A : List[str] ): __snake_case , __snake_case: Union[str, Any] = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""" , fpaa=A ) __snake_case: Optional[int] = self.get_latents(A , shape=(4, 4, 96, 96) , fpaa=A ) __snake_case: str = self.get_encoder_hidden_states(A , shape=(4, 77, 1_024) , fpaa=A ) __snake_case: str = model.apply( {"""params""": params} , A , jnp.array(A , dtype=jnp.intaa ) , encoder_hidden_states=A , ).sample assert sample.shape == latents.shape __snake_case: Dict = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) __snake_case: Any = jnp.array(A , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(A , A , atol=1E-2 )
111
1
import collections import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _A = logging.get_logger(__name__) _A = "▁" _A = {"vocab_file": "prophetnet.tokenizer"} _A = { "vocab_file": { "microsoft/xprophetnet-large-wiki100-cased": ( "https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer" ), } } _A = { "microsoft/xprophetnet-large-wiki100-cased": {"do_lower_case": False}, } _A = { "microsoft/xprophetnet-large-wiki100-cased": 5_12, } def lowercase_ ( A__ ) -> List[Any]: """simple docstring""" snake_case = collections.OrderedDict() with open(A__ , "r" , encoding="utf-8" ) as reader: snake_case = reader.readlines() for index, token in enumerate(A__ ): snake_case = token.rstrip("\n" ) snake_case = index return vocab class lowerCamelCase ( A_ ): UpperCAmelCase__ : Dict = VOCAB_FILES_NAMES UpperCAmelCase__ : str = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ : List[Any] = ["input_ids", "attention_mask"] def __init__(self : Dict , _A : Tuple , _A : Union[str, Any]="[SEP]" , _A : Optional[Any]="[SEP]" , _A : List[Any]="[SEP]" , _A : Any="[UNK]" , _A : Tuple="[PAD]" , _A : Tuple="[CLS]" , _A : str="[MASK]" , _A : Optional[Dict[str, Any]] = None , **_A : Optional[int] , ) -> None: snake_case = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_A , eos_token=_A , sep_token=_A , unk_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , ) try: import sentencepiece as spm except ImportError: logger.warning( "You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece" " pip install sentencepiece" ) raise snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_A ) ) snake_case = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # put special tokens and [unused] tokens into the vocab snake_case = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4} for i in range(1_0 ): snake_case = f'[unused{i}]' snake_case = 5 + i # The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab snake_case = 1_2 snake_case = {v: k for k, v in self.fairseq_tokens_to_ids.items()} for k in self.fairseq_tokens_to_ids.keys(): self.unique_no_split_tokens.append(_A ) def __getstate__(self : Tuple ) -> int: snake_case = self.__dict__.copy() snake_case = None return state def __setstate__(self : Union[str, Any] , _A : Any ) -> Optional[Any]: snake_case = d try: import sentencepiece as spm except ImportError: logger.warning( "You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece" " pip install sentencepiece" ) raise # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): snake_case = {} snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCAmelCase(self : Optional[Any] , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A ) if token_ids_a is None: return ([0] * len(_A )) + [1] return ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1] def UpperCAmelCase(self : Tuple , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]: snake_case = [self.sep_token_id] if token_ids_a is None: return len(token_ids_a + sep ) * [0] return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def UpperCAmelCase(self : Any ) -> Tuple: return len(self.sp_model ) + self.fairseq_offset def UpperCAmelCase(self : Optional[Any] ) -> List[str]: snake_case = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCAmelCase(self : Optional[Any] , _A : str ) -> str: return self.sp_model.encode(_A , out_type=_A ) def UpperCAmelCase(self : Tuple , _A : Union[str, Any] ) -> Union[str, Any]: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] snake_case = self.sp_model.PieceToId(_A ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def UpperCAmelCase(self : Union[str, Any] , _A : str ) -> str: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def UpperCAmelCase(self : List[str] , _A : Dict ) -> Union[str, Any]: snake_case = "".join(_A ).replace(_A , " " ).strip() return out_string def UpperCAmelCase(self : List[str] , _A : str , _A : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(_A ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return snake_case = os.path.join( _A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _A ) elif not os.path.isfile(self.vocab_file ): with open(_A , "wb" ) as fi: snake_case = self.sp_model.serialized_model_proto() fi.write(_A ) return (out_vocab_file,) def UpperCAmelCase(self : List[str] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return token_ids_a + [self.sep_token_id] snake_case = [self.sep_token_id] return token_ids_a + sep + token_ids_a + sep
137
from ..utils import DummyObject, requires_backends class lowerCamelCase ( metaclass=A_ ): UpperCAmelCase__ : Union[str, Any] = ["onnx"] def __init__(self : Tuple , *_A : Optional[int] , **_A : Any ) -> Dict: requires_backends(self , ["onnx"] ) @classmethod def UpperCAmelCase(cls : int , *_A : Dict , **_A : List[Any] ) -> Optional[Any]: requires_backends(cls , ["onnx"] ) @classmethod def UpperCAmelCase(cls : Dict , *_A : Tuple , **_A : Optional[Any] ) -> int: requires_backends(cls , ["onnx"] )
137
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase__ = { """configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""], """tokenization_roberta""": ["""RobertaTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = ["""RobertaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ """ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """RobertaForCausalLM""", """RobertaForMaskedLM""", """RobertaForMultipleChoice""", """RobertaForQuestionAnswering""", """RobertaForSequenceClassification""", """RobertaForTokenClassification""", """RobertaModel""", """RobertaPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ """TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFRobertaForCausalLM""", """TFRobertaForMaskedLM""", """TFRobertaForMultipleChoice""", """TFRobertaForQuestionAnswering""", """TFRobertaForSequenceClassification""", """TFRobertaForTokenClassification""", """TFRobertaMainLayer""", """TFRobertaModel""", """TFRobertaPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ """FlaxRobertaForCausalLM""", """FlaxRobertaForMaskedLM""", """FlaxRobertaForMultipleChoice""", """FlaxRobertaForQuestionAnswering""", """FlaxRobertaForSequenceClassification""", """FlaxRobertaForTokenClassification""", """FlaxRobertaModel""", """FlaxRobertaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig from .tokenization_roberta import RobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roberta_fast import RobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, RobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta import ( TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaMainLayer, TFRobertaModel, TFRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, FlaxRobertaPreTrainedModel, ) else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
289
from math import isqrt, loga def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> list[int]: __lowercase = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): __lowercase = False return [i for i in range(2 , SCREAMING_SNAKE_CASE ) if is_prime[i]] def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int = 800800 , SCREAMING_SNAKE_CASE : int = 800800 ) -> int: __lowercase = degree * loga(SCREAMING_SNAKE_CASE ) __lowercase = int(SCREAMING_SNAKE_CASE ) __lowercase = calculate_prime_numbers(SCREAMING_SNAKE_CASE ) __lowercase = 0 __lowercase = 0 __lowercase = len(SCREAMING_SNAKE_CASE ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(F'''{solution() = }''')
325
0
'''simple docstring''' def SCREAMING_SNAKE_CASE__ ( snake_case : int ) -> int: """simple docstring""" if not isinstance(_snake_case , _snake_case ): a : List[str] = F"""Input value of [number={number}] must be an integer""" raise TypeError(_snake_case ) if number < 1: a : Tuple = F"""Input value of [number={number}] must be > 0""" raise ValueError(_snake_case ) a : str = 1 for i in range(1 , _snake_case ): current_number *= 4 * i - 2 current_number //= i + 1 return current_number if __name__ == "__main__": import doctest doctest.testmod()
369
'''simple docstring''' from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch UpperCamelCase : Dict = logging.get_logger(__name__) class UpperCamelCase ( a_ ): """simple docstring""" A : Any = ["pixel_values"] def __init__( self : str , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase_ : List[str] , ): """simple docstring""" super().__init__(**UpperCAmelCase_) a : str = size if size is not None else {'shortest_edge': 2_5_6} a : Dict = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_) a : str = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4} a : int = get_size_dict(UpperCAmelCase_ , param_name='crop_size') a : Any = do_resize a : List[str] = size a : Union[str, Any] = resample a : int = do_center_crop a : Optional[int] = crop_size a : Tuple = do_rescale a : int = rescale_factor a : Optional[Any] = do_normalize a : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN a : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ): """simple docstring""" a : Optional[int] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_) if "shortest_edge" not in size: raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""") a : Union[str, Any] = get_resize_output_image_size(UpperCAmelCase_ , size=size['shortest_edge'] , default_to_square=UpperCAmelCase_) return resize(UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ): """simple docstring""" a : List[str] = get_size_dict(UpperCAmelCase_) if "height" not in size or "width" not in size: raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""") return center_crop(UpperCAmelCase_ , size=(size['height'], size['width']) , data_format=UpperCAmelCase_ , **UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : float , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[Any]): """simple docstring""" return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ): """simple docstring""" return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase_ : List[str] , ): """simple docstring""" a : int = do_resize if do_resize is not None else self.do_resize a : int = size if size is not None else self.size a : Union[str, Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_) a : str = resample if resample is not None else self.resample a : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop a : Union[str, Any] = crop_size if crop_size is not None else self.crop_size a : Dict = get_size_dict(UpperCAmelCase_ , param_name='crop_size') a : str = do_rescale if do_rescale is not None else self.do_rescale a : int = rescale_factor if rescale_factor is not None else self.rescale_factor a : str = do_normalize if do_normalize is not None else self.do_normalize a : List[str] = image_mean if image_mean is not None else self.image_mean a : Optional[int] = image_std if image_std is not None else self.image_std a : Dict = make_list_of_images(UpperCAmelCase_) if not valid_images(UpperCAmelCase_): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.') if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.') if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.') if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.') if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.') # All transformations expect numpy arrays. a : List[Any] = [to_numpy_array(UpperCAmelCase_) for image in images] if do_resize: a : Dict = [self.resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_) for image in images] if do_center_crop: a : Any = [self.center_crop(image=UpperCAmelCase_ , size=UpperCAmelCase_) for image in images] if do_rescale: a : Optional[int] = [self.rescale(image=UpperCAmelCase_ , scale=UpperCAmelCase_) for image in images] if do_normalize: a : Dict = [self.normalize(image=UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_) for image in images] a : List[Any] = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_) for image in images] a : List[str] = {'pixel_values': images} return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Tuple] = None): """simple docstring""" a : Dict = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(UpperCAmelCase_) != len(UpperCAmelCase_): raise ValueError( 'Make sure that you pass in as many target sizes as the batch dimension of the logits') if is_torch_tensor(UpperCAmelCase_): a : Optional[Any] = target_sizes.numpy() a : List[str] = [] for idx in range(len(UpperCAmelCase_)): a : Optional[Any] = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=UpperCAmelCase_) a : Union[str, Any] = resized_logits[0].argmax(dim=0) semantic_segmentation.append(UpperCAmelCase_) else: a : Optional[int] = logits.argmax(dim=1) a : List[str] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation
345
0
"""simple docstring""" from math import sqrt def a__ ( snake_case__ ) -> int: lowerCamelCase = 0 for i in range(1 , int(sqrt(snake_case__ ) + 1 ) ): if n % i == 0 and i != sqrt(snake_case__ ): total += i + n // i elif i == sqrt(snake_case__ ): total += i return total - n def a__ ( snake_case__ = 1_00_00 ) -> int: lowerCamelCase = sum( i for i in range(1 , snake_case__ ) if sum_of_divisors(sum_of_divisors(snake_case__ ) ) == i and sum_of_divisors(snake_case__ ) != i ) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
291
"""simple docstring""" from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging lowerCAmelCase : List[str] = logging.get_logger(__name__) class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = ["audio_values", "audio_mask"] def __init__( self , _a=2_048 , _a=1 , _a=[16, 16] , _a=128 , _a=44_100 , _a=86 , _a=2_048 , _a=0.0 , **_a , ): """simple docstring""" super().__init__( feature_size=_a , sampling_rate=_a , padding_value=_a , **_a , ) lowerCamelCase = spectrogram_length lowerCamelCase = num_channels lowerCamelCase = patch_size lowerCamelCase = feature_size // self.patch_size[1] lowerCamelCase = n_fft lowerCamelCase = sampling_rate // hop_length_to_sampling_rate lowerCamelCase = sampling_rate lowerCamelCase = padding_value lowerCamelCase = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_a , min_frequency=0.0 , max_frequency=22_050.0 , sampling_rate=_a , norm="""slaney""" , mel_scale="""slaney""" , ).T def _lowerCAmelCase ( self , _a ): """simple docstring""" lowerCamelCase = spectrogram( _a , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , ) lowerCamelCase = log_spec[:, :-1] lowerCamelCase = log_spec - 20.0 lowerCamelCase = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self , _a , _a = None , _a = True , _a = None , _a = False , _a = False , **_a , ): """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( """This feature extractor is set to support sampling rate""" f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled' f' with {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) lowerCamelCase = isinstance(_a , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}' ) lowerCamelCase = is_batched_numpy or ( isinstance(_a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowerCamelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(_a , np.ndarray ): lowerCamelCase = np.asarray(_a , dtype=np.floataa ) elif isinstance(_a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowerCamelCase = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowerCamelCase = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis lowerCamelCase = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , _a ): lowerCamelCase = [np.asarray(_a , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask lowerCamelCase = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: lowerCamelCase = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] lowerCamelCase = np.array(_a ).astype(np.floataa ) # convert into correct format for padding lowerCamelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch lowerCamelCase = np.ones([len(_a ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) lowerCamelCase = padded_audio_features * self.padding_value for i in range(len(_a ) ): lowerCamelCase = audio_features[i] lowerCamelCase = feature # return as BatchFeature if return_attention_mask: lowerCamelCase = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask} else: lowerCamelCase = {"""audio_values""": padded_audio_features} lowerCamelCase = BatchFeature(data=_a , tensor_type=_a ) return encoded_inputs
291
1
import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class A ( unittest.TestCase ): """simple docstring""" def snake_case__ ( self : List[Any] )-> List[Any]: '''simple docstring''' super().tearDown() gc.collect() def snake_case__ ( self : List[Any] )-> Dict: '''simple docstring''' A__ , A__ = FlaxStableDiffusionPipeline.from_pretrained( 'stabilityai/stable-diffusion-2',revision='bf16',dtype=jnp.bfloataa,) A__ = 'A painting of a squirrel eating a burger' A__ = jax.device_count() A__ = num_samples * [prompt] A__ = sd_pipe.prepare_inputs(lowercase_ ) A__ = replicate(lowercase_ ) A__ = shard(lowercase_ ) A__ = jax.random.PRNGKey(0 ) A__ = jax.random.split(lowercase_,jax.device_count() ) A__ = sd_pipe(lowercase_,lowercase_,lowercase_,num_inference_steps=2_5,jit=lowercase_ )[0] assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3) A__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) A__ = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] A__ = jnp.asarray(jax.device_get(image_slice.flatten() ) ) A__ = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] ) print(F'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def snake_case__ ( self : List[Any] )-> str: '''simple docstring''' A__ = 'stabilityai/stable-diffusion-2' A__ , A__ = FlaxDPMSolverMultistepScheduler.from_pretrained(lowercase_,subfolder='scheduler' ) A__ , A__ = FlaxStableDiffusionPipeline.from_pretrained( lowercase_,scheduler=lowercase_,revision='bf16',dtype=jnp.bfloataa,) A__ = scheduler_params A__ = 'A painting of a squirrel eating a burger' A__ = jax.device_count() A__ = num_samples * [prompt] A__ = sd_pipe.prepare_inputs(lowercase_ ) A__ = replicate(lowercase_ ) A__ = shard(lowercase_ ) A__ = jax.random.PRNGKey(0 ) A__ = jax.random.split(lowercase_,jax.device_count() ) A__ = sd_pipe(lowercase_,lowercase_,lowercase_,num_inference_steps=2_5,jit=lowercase_ )[0] assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3) A__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) A__ = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] A__ = jnp.asarray(jax.device_get(image_slice.flatten() ) ) A__ = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] ) print(F'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
282
def _snake_case( SCREAMING_SNAKE_CASE__ : int = 1000 ) -> int: '''simple docstring''' A__ = 3 A__ = 0 while a < n: if a % 3 == 0 or a % 5 == 0: result += a elif a % 15 == 0: result -= a a += 1 return result if __name__ == "__main__": print(f"""{solution() = }""")
282
1
"""simple docstring""" from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=A ) class lowerCamelCase__ ( A ): """simple docstring""" __a = field(default="""summarization""" , metadata={"""include_in_asdict_even_if_is_default""": True} ) __a = Features({"""text""": Value("""string""" )} ) __a = Features({"""summary""": Value("""string""" )} ) __a = "text" __a = "summary" @property def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' return {self.text_column: "text", self.summary_column: "summary"}
115
"""simple docstring""" from ..utils import DummyObject, requires_backends class lowerCamelCase__ ( metaclass=A ): """simple docstring""" __a = ["""note_seq"""] def __init__( self : List[Any] , *UpperCamelCase : List[Any] , **UpperCamelCase : Tuple ): '''simple docstring''' requires_backends(self , ["""note_seq"""] ) @classmethod def lowerCamelCase__ ( cls : Union[str, Any] , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : str ): '''simple docstring''' requires_backends(cls , ["""note_seq"""] ) @classmethod def lowerCamelCase__ ( cls : List[str] , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["""note_seq"""] )
115
1
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _SCREAMING_SNAKE_CASE = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ '''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FocalNetForImageClassification''', '''FocalNetForMaskedImageModeling''', '''FocalNetBackbone''', '''FocalNetModel''', '''FocalNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
366
'''simple docstring''' from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import BaseOutput, is_torch_available, is_transformers_available @dataclass class __lowercase ( lowerCAmelCase__ ): '''simple docstring''' a : Union[List[PIL.Image.Image], np.ndarray] a : Optional[List[bool]] if is_transformers_available() and is_torch_available(): from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
217
0
'''simple docstring''' from datetime import datetime import requests def lowerCamelCase__ ( _A ): a : Tuple = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url=""" a : List[Any] = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""] return requests.get(__lowerCamelCase ).content if __name__ == "__main__": lowerCAmelCase: Tuple = input('Enter Video/IGTV url: ').strip() lowerCAmelCase: Any = F"{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4" with open(file_name, 'wb') as fp: fp.write(download_video(url)) print(F"Done. Video saved to disk as {file_name}.")
297
from functools import lru_cache @lru_cache def lowerCamelCase__ ( __lowerCamelCase : int ): if num < 0: raise ValueError("""Number should not be negative.""" ) return 1 if num in (0, 1) else num * factorial(num - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
114
0
"""simple docstring""" import sys from collections import defaultdict class A_ : '''simple docstring''' def __init__( self ): """simple docstring""" UpperCAmelCase_ : List[Any] = [] def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" return self.node_position[vertex] def UpperCamelCase__ ( self , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : str = pos def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" if start > size // 2 - 1: return else: if 2 * start + 2 >= size: UpperCAmelCase_ : List[Any] = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: UpperCAmelCase_ : Dict = 2 * start + 1 else: UpperCAmelCase_ : List[str] = 2 * start + 2 if heap[smallest_child] < heap[start]: UpperCAmelCase_ : Tuple = heap[smallest_child], positions[smallest_child] UpperCAmelCase_ : List[str] = ( heap[start], positions[start], ) UpperCAmelCase_ : int = temp, tempa UpperCAmelCase_ : Any = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] , self.get_position(positions[start] ) ) self.set_position(positions[start] , lowercase_ ) self.top_to_bottom(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : List[Any] = position[index] while index != 0: UpperCAmelCase_ : str = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: UpperCAmelCase_ : Any = heap[parent] UpperCAmelCase_ : Tuple = position[parent] self.set_position(position[parent] , lowercase_ ) else: UpperCAmelCase_ : Optional[int] = val UpperCAmelCase_ : Tuple = temp self.set_position(lowercase_ , lowercase_ ) break UpperCAmelCase_ : Dict = parent else: UpperCAmelCase_ : Optional[Any] = val UpperCAmelCase_ : Tuple = temp self.set_position(lowercase_ , 0 ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Tuple = len(lowercase_ ) // 2 - 1 for i in range(lowercase_ , -1 , -1 ): self.top_to_bottom(lowercase_ , lowercase_ , len(lowercase_ ) , lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : List[str] = positions[0] UpperCAmelCase_ : Dict = sys.maxsize self.top_to_bottom(lowercase_ , 0 , len(lowercase_ ) , lowercase_ ) return temp def __a ( __lowerCamelCase ): UpperCAmelCase_ : Any = Heap() UpperCAmelCase_ : List[str] = [0] * len(__lowerCamelCase ) UpperCAmelCase_ : Any = [-1] * len(__lowerCamelCase ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph UpperCAmelCase_ : Any = [] # Heap of Distance of vertices from their neighboring vertex UpperCAmelCase_ : Optional[Any] = [] for vertex in range(len(__lowerCamelCase ) ): distance_tv.append(sys.maxsize ) positions.append(__lowerCamelCase ) heap.node_position.append(__lowerCamelCase ) UpperCAmelCase_ : Tuple = [] UpperCAmelCase_ : List[str] = 1 UpperCAmelCase_ : Tuple = sys.maxsize for neighbor, distance in adjacency_list[0]: UpperCAmelCase_ : Dict = 0 UpperCAmelCase_ : List[Any] = distance heap.heapify(__lowerCamelCase, __lowerCamelCase ) for _ in range(1, len(__lowerCamelCase ) ): UpperCAmelCase_ : List[str] = heap.delete_minimum(__lowerCamelCase, __lowerCamelCase ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) UpperCAmelCase_ : Optional[Any] = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(__lowerCamelCase )] ): UpperCAmelCase_ : Any = distance heap.bottom_to_top( __lowerCamelCase, heap.get_position(__lowerCamelCase ), __lowerCamelCase, __lowerCamelCase ) UpperCAmelCase_ : Tuple = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > _a = int(input('Enter number of edges: ').strip()) _a = defaultdict(list) for _ in range(edges_number): _a = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
351
"""simple docstring""" import argparse import logging import sys from unittest.mock import patch import run_glue_deebert from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow logging.basicConfig(level=logging.DEBUG) _a = logging.getLogger() def __a ( ): UpperCAmelCase_ : Tuple = argparse.ArgumentParser() parser.add_argument("-f" ) UpperCAmelCase_ : Dict = parser.parse_args() return args.f class A_ (lowercase__ ): '''simple docstring''' def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Tuple = logging.StreamHandler(sys.stdout ) logger.addHandler(lowercase_ ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[int] = get_gpu_count() if n_gpu > 1: pass # XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560 # script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py" # distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split() # cmd = [sys.executable] + distributed_args + args # execute_subprocess_async(cmd, env=self.get_env()) # XXX: test the results - need to save them first into .json file else: args.insert(0 , "run_glue_deebert.py" ) with patch.object(lowercase_ , "argv" , lowercase_ ): UpperCAmelCase_ : List[str] = run_glue_deebert.main() for value in result.values(): self.assertGreaterEqual(lowercase_ , 0.6_66 ) @slow @require_torch_non_multi_gpu def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split() self.run_and_check(lowercase_ ) UpperCAmelCase_ : Optional[Any] = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split() self.run_and_check(lowercase_ ) UpperCAmelCase_ : Dict = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split() self.run_and_check(lowercase_ )
23
0
"""simple docstring""" import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow __A : Union[str, Any] = logging.getLogger() @unittest.skip("""Temporarily disable the doc tests.""") @require_torch @require_tf @slow class _a ( unittest.TestCase): """simple docstring""" def lowercase__ ( self : List[Any] , __UpperCamelCase : Path , __UpperCamelCase : Union[str, None] = None , __UpperCamelCase : Union[List[str], None] = None , __UpperCamelCase : Union[str, List[str], None] = None , __UpperCamelCase : bool = True , )->Any: _UpperCAmelCase = [file for file in os.listdir(__UpperCamelCase ) if os.path.isfile(os.path.join(__UpperCamelCase , __UpperCamelCase ) )] if identifier is not None: _UpperCAmelCase = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(__UpperCamelCase , __UpperCamelCase ): for n_ in n_identifier: _UpperCAmelCase = [file for file in files if n_ not in file] else: _UpperCAmelCase = [file for file in files if n_identifier not in file] _UpperCAmelCase = ignore_files or [] ignore_files.append('''__init__.py''' ) _UpperCAmelCase = [file for file in files if file not in ignore_files] for file in files: # Open all files print('''Testing''' , __UpperCamelCase ) if only_modules: _UpperCAmelCase = file.split('''.''' )[0] try: _UpperCAmelCase = getattr(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = doctest.DocTestSuite(__UpperCamelCase ) _UpperCAmelCase = unittest.TextTestRunner().run(__UpperCamelCase ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(F'{module_identifier} is not a module.' ) else: _UpperCAmelCase = doctest.testfile(str('''..''' / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def lowercase__ ( self : Any )->Dict: _UpperCAmelCase = Path('''src/transformers''' ) _UpperCAmelCase = '''modeling''' _UpperCAmelCase = [ '''modeling_ctrl.py''', '''modeling_tf_ctrl.py''', ] self.analyze_directory(__UpperCamelCase , identifier=__UpperCamelCase , ignore_files=__UpperCamelCase ) def lowercase__ ( self : Dict )->Any: _UpperCAmelCase = Path('''src/transformers''' ) _UpperCAmelCase = '''tokenization''' self.analyze_directory(__UpperCamelCase , identifier=__UpperCamelCase ) def lowercase__ ( self : Tuple )->Optional[Any]: _UpperCAmelCase = Path('''src/transformers''' ) _UpperCAmelCase = '''configuration''' self.analyze_directory(__UpperCamelCase , identifier=__UpperCamelCase ) def lowercase__ ( self : Optional[Any] )->int: _UpperCAmelCase = Path('''src/transformers''' ) _UpperCAmelCase = ['''configuration''', '''modeling''', '''tokenization'''] self.analyze_directory(__UpperCamelCase , n_identifier=__UpperCamelCase ) def lowercase__ ( self : List[Any] )->str: _UpperCAmelCase = Path('''docs/source''' ) _UpperCAmelCase = ['''favicon.ico'''] self.analyze_directory(__UpperCamelCase , ignore_files=__UpperCamelCase , only_modules=__UpperCamelCase )
260
"""simple docstring""" import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = (DPMSolverSinglestepScheduler,) UpperCamelCase__ = (("""num_inference_steps""", 25),) def lowercase__ ( self : Tuple , **__UpperCamelCase : Tuple )->Any: _UpperCAmelCase = { '''num_train_timesteps''': 1_0_0_0, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', '''solver_order''': 2, '''prediction_type''': '''epsilon''', '''thresholding''': False, '''sample_max_value''': 1.0, '''algorithm_type''': '''dpmsolver++''', '''solver_type''': '''midpoint''', '''lambda_min_clipped''': -float('''inf''' ), '''variance_type''': None, } config.update(**__UpperCamelCase ) return config def lowercase__ ( self : Dict , __UpperCamelCase : Tuple=0 , **__UpperCamelCase : Optional[int] )->Tuple: _UpperCAmelCase = dict(self.forward_default_kwargs ) _UpperCAmelCase = kwargs.pop('''num_inference_steps''' , __UpperCamelCase ) _UpperCAmelCase = self.dummy_sample _UpperCAmelCase = 0.1 * sample _UpperCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals _UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__UpperCamelCase ) _UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase ) new_scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals _UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _UpperCAmelCase , _UpperCAmelCase = sample, sample for t in range(__UpperCamelCase , time_step + scheduler.config.solver_order + 1 ): _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample _UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowercase__ ( self : Any )->Union[str, Any]: pass def lowercase__ ( self : str , __UpperCamelCase : Tuple=0 , **__UpperCamelCase : List[Any] )->Dict: _UpperCAmelCase = dict(self.forward_default_kwargs ) _UpperCAmelCase = kwargs.pop('''num_inference_steps''' , __UpperCamelCase ) _UpperCAmelCase = self.dummy_sample _UpperCAmelCase = 0.1 * sample _UpperCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: _UpperCAmelCase = self.get_scheduler_config() _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals (must be after setting timesteps) _UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__UpperCamelCase ) _UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residual (must be after setting timesteps) _UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample _UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowercase__ ( self : int , __UpperCamelCase : List[str]=None , **__UpperCamelCase : Optional[int] )->List[Any]: if scheduler is None: _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = 1_0 _UpperCAmelCase = self.dummy_model() _UpperCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(__UpperCamelCase ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample return sample def lowercase__ ( self : List[Any] )->Dict: _UpperCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) _UpperCAmelCase = 5_0 _UpperCAmelCase = self.dummy_model() _UpperCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(__UpperCamelCase ) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:] ): _UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_5_7_4 ) < 1e-3 def lowercase__ ( self : Dict )->Dict: for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_configs(num_train_timesteps=__UpperCamelCase ) def lowercase__ ( self : str )->Optional[Any]: # make sure that iterating over schedulers with same config names gives same results # for defaults _UpperCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) _UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3 _UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config ) _UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3 def lowercase__ ( self : Union[str, Any] )->int: self.check_over_configs(thresholding=__UpperCamelCase ) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , algorithm_type='''dpmsolver++''' , solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , ) def lowercase__ ( self : str )->str: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__UpperCamelCase ) def lowercase__ ( self : List[Any] )->Tuple: for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , algorithm_type=__UpperCamelCase , ) _UpperCAmelCase = self.full_loop( solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , algorithm_type=__UpperCamelCase , ) assert not torch.isnan(__UpperCamelCase ).any(), "Samples have nan numbers" def lowercase__ ( self : Dict )->List[str]: self.check_over_configs(lower_order_final=__UpperCamelCase ) self.check_over_configs(lower_order_final=__UpperCamelCase ) def lowercase__ ( self : Dict )->str: self.check_over_configs(lambda_min_clipped=-float('''inf''' ) ) self.check_over_configs(lambda_min_clipped=-5.1 ) def lowercase__ ( self : List[str] )->int: self.check_over_configs(variance_type=__UpperCamelCase ) self.check_over_configs(variance_type='''learned_range''' ) def lowercase__ ( self : List[str] )->Union[str, Any]: for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_forward(num_inference_steps=__UpperCamelCase , time_step=0 ) def lowercase__ ( self : List[Any] )->int: _UpperCAmelCase = self.full_loop() _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3 def lowercase__ ( self : List[str] )->List[str]: _UpperCAmelCase = self.full_loop(use_karras_sigmas=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_2_4_8 ) < 1e-3 def lowercase__ ( self : int )->List[Any]: _UpperCAmelCase = self.full_loop(prediction_type='''v_prediction''' ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.1_4_5_3 ) < 1e-3 def lowercase__ ( self : Optional[Any] )->Dict: _UpperCAmelCase = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.0_6_4_9 ) < 1e-3 def lowercase__ ( self : Union[str, Any] )->List[str]: _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(thresholding=__UpperCamelCase , dynamic_thresholding_ratio=0 ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = 1_0 _UpperCAmelCase = self.dummy_model() _UpperCAmelCase = self.dummy_sample_deter.half() scheduler.set_timesteps(__UpperCamelCase ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample assert sample.dtype == torch.floataa
260
1
import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser UpperCAmelCase = logging.getLogger(__name__) torch.set_grad_enabled(False) UpperCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu' def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=100 , SCREAMING_SNAKE_CASE=" " )-> Optional[int]: """simple docstring""" snake_case_ = text.split(UpperCAmelCase_ ) return [character.join(text[i : i + n] ).strip() for i in range(0 , len(UpperCAmelCase_ ) , UpperCAmelCase_ )] def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Dict: """simple docstring""" snake_case_ , snake_case_ = [], [] for title, text in zip(documents['''title'''] , documents['''text'''] ): if text is not None: for passage in split_text(UpperCAmelCase_ ): titles.append(title if title is not None else '''''' ) texts.append(UpperCAmelCase_ ) return {"title": titles, "text": texts} def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Tuple: """simple docstring""" snake_case_ = ctx_tokenizer( documents['''title'''] , documents['''text'''] , truncation=UpperCAmelCase_ , padding='''longest''' , return_tensors='''pt''' )['''input_ids'''] snake_case_ = ctx_encoder(input_ids.to(device=UpperCAmelCase_ ) , return_dict=UpperCAmelCase_ ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )-> Dict: """simple docstring""" logger.info('''Step 1 - Create the dataset''' ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way snake_case_ = load_dataset( '''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words snake_case_ = dataset.map(UpperCAmelCase_ , batched=UpperCAmelCase_ , num_proc=processing_args.num_proc ) # And compute the embeddings snake_case_ = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=UpperCAmelCase_ ) snake_case_ = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) snake_case_ = Features( {'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space snake_case_ = dataset.map( partial(UpperCAmelCase_ , ctx_encoder=UpperCAmelCase_ , ctx_tokenizer=UpperCAmelCase_ ) , batched=UpperCAmelCase_ , batch_size=processing_args.batch_size , features=UpperCAmelCase_ , ) # And finally save your dataset snake_case_ = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' ) dataset.save_to_disk(UpperCAmelCase_ ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info('''Step 2 - Index the dataset''' ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search snake_case_ = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index('''embeddings''' , custom_index=UpperCAmelCase_ ) # And save the index snake_case_ = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' ) dataset.get_index('''embeddings''' ).save(UpperCAmelCase_ ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class lowerCAmelCase_ : '''simple docstring''' __snake_case = field( default=str(Path(_lowerCamelCase ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , ) __snake_case = field( default=_lowerCamelCase , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , ) __snake_case = field( default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , ) __snake_case = field( default="facebook/dpr-ctx_encoder-multiset-base" , metadata={ "help": ( "The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or" " 'facebook/dpr-ctx_encoder-multiset-base'" ) } , ) __snake_case = field( default=str(Path(_lowerCamelCase ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , ) @dataclass class lowerCAmelCase_ : '''simple docstring''' __snake_case = field( default=_lowerCamelCase , metadata={ "help": "The number of processes to use to split the documents into passages. Default is single process." } , ) __snake_case = field( default=1_6 , metadata={ "help": "The batch size to use when computing the passages embeddings using the DPR context encoder." } , ) @dataclass class lowerCAmelCase_ : '''simple docstring''' __snake_case = field( default=7_6_8 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , ) __snake_case = field( default=1_2_8 , metadata={ "help": ( "The number of bi-directional links created for every new element during the HNSW index construction." ) } , ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) UpperCAmelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) UpperCAmelCase = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: UpperCAmelCase = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
353
# Function to print upper half of diamond (pyramid) def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Dict: """simple docstring""" for i in range(0 , SCREAMING_SNAKE_CASE ): for _ in range(0 , n - i - 1 ): # printing spaces print(''' ''' , end='''''' ) for _ in range(0 , i + 1 ): # printing stars print('''* ''' , end='''''' ) print() def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Dict: """simple docstring""" for i in range(SCREAMING_SNAKE_CASE , 0 , -1 ): for _ in range(SCREAMING_SNAKE_CASE , 0 , -1 ): # printing stars print('''* ''' , end='''''' ) print() for _ in range(n - i + 1 , 0 , -1 ): # printing spaces print(''' ''' , end='''''' ) def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Any: """simple docstring""" if n <= 0: print(''' ... .... nothing printing :(''' ) return floyd(SCREAMING_SNAKE_CASE ) # upper half reverse_floyd(SCREAMING_SNAKE_CASE ) # lower half if __name__ == "__main__": print(r"""| /\ | |- | |- |--| |\ /| |-""") print(r"""|/ \| |- |_ |_ |__| | \/ | |_""") UpperCAmelCase = 1 while K: UpperCAmelCase = int(input("""enter the number and , and see the magic : """)) print() pretty_print(user_number) UpperCAmelCase = int(input("""press 0 to exit... and 1 to continue...""")) print("""Good Bye...""")
267
0
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_camembert import CamembertTokenizer else: lowerCAmelCase_ : List[str] = None lowerCAmelCase_ : str = logging.get_logger(__name__) lowerCAmelCase_ : Any = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} lowerCAmelCase_ : Optional[Any] = { 'vocab_file': { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model', }, 'tokenizer_file': { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json', }, } lowerCAmelCase_ : Union[str, Any] = { 'camembert-base': 5_12, } lowerCAmelCase_ : List[str] = '▁' class __SCREAMING_SNAKE_CASE (snake_case_ ): """simple docstring""" __a =VOCAB_FILES_NAMES __a =PRETRAINED_VOCAB_FILES_MAP __a =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a =['input_ids', 'attention_mask'] __a =CamembertTokenizer def __init__( self : List[str] , __a : Optional[int]=None , __a : Union[str, Any]=None , __a : Optional[Any]="<s>" , __a : Union[str, Any]="</s>" , __a : Union[str, Any]="</s>" , __a : Optional[Any]="<s>" , __a : str="<unk>" , __a : Tuple="<pad>" , __a : Tuple="<mask>" , __a : Dict=["<s>NOTUSED", "</s>NOTUSED"] , **__a : str , ): # Mask token behave like a normal word, i.e. include the space before it _a = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token super().__init__( __a , tokenizer_file=__a , bos_token=__a , eos_token=__a , sep_token=__a , cls_token=__a , unk_token=__a , pad_token=__a , mask_token=__a , additional_special_tokens=__a , **__a , ) _a = vocab_file _a = False if not self.vocab_file else True def UpperCamelCase__ ( self : Optional[Any] , __a : int , __a : List[Any] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _a = [self.cls_token_id] _a = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCamelCase__ ( self : Optional[int] , __a : Tuple , __a : Dict = None ): _a = [self.sep_token_id] _a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def UpperCamelCase__ ( self : List[str] , __a : str , __a : List[Any] = None ): if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(__a ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return _a = os.path.join( __a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ): copyfile(self.vocab_file , __a ) return (out_vocab_file,)
63
import argparse import os import torch from transformers import ( XLNetConfig, XLNetForQuestionAnswering, XLNetForSequenceClassification, XLNetLMHeadModel, load_tf_weights_in_xlnet, ) from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging lowercase = { "cola": 2, "mnli": 3, "mrpc": 2, "sst-2": 2, "sts-b": 1, "qqp": 2, "qnli": 2, "rte": 2, "wnli": 2, } logging.set_verbosity_info() def __UpperCAmelCase ( a_ , a_ , a_ , a_=None): # Initialise PyTorch model snake_case_ = XLNetConfig.from_json_file(a_) snake_case_ = finetuning_task.lower() if finetuning_task is not None else '' if finetuning_task in GLUE_TASKS_NUM_LABELS: print(f'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''') snake_case_ = finetuning_task snake_case_ = GLUE_TASKS_NUM_LABELS[finetuning_task] snake_case_ = XLNetForSequenceClassification(a_) elif "squad" in finetuning_task: snake_case_ = finetuning_task snake_case_ = XLNetForQuestionAnswering(a_) else: snake_case_ = XLNetLMHeadModel(a_) # Load weights from tf checkpoint load_tf_weights_in_xlnet(a_ , a_ , a_) # Save pytorch-model snake_case_ = os.path.join(a_ , a_) snake_case_ = os.path.join(a_ , a_) print(f'''Save PyTorch model to {os.path.abspath(a_)}''') torch.save(model.state_dict() , a_) print(f'''Save configuration file to {os.path.abspath(a_)}''') with open(a_ , 'w' , encoding='utf-8') as f: f.write(config.to_json_string()) if __name__ == "__main__": lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--xlnet_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained XLNet model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the folder to store the PyTorch model or dataset/vocab.", ) parser.add_argument( "--finetuning_task", default=None, type=str, help="Name of a task on which the XLNet TensorFlow model was fine-tuned", ) lowercase = parser.parse_args() print(args) convert_xlnet_checkpoint_to_pytorch( args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task )
178
0
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, Pipeline, ZeroShotClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. lowerCamelCase_ = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''} @is_pipeline_test class UpperCamelCase_ (unittest.TestCase ): __magic_name__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING __magic_name__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: __magic_name__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: __magic_name__ = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] ) -> Optional[int]: UpperCAmelCase_ : Optional[int] = ZeroShotClassificationPipeline( model=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , candidate_labels=["polics", "health"] ) return classifier, ["Who are you voting for in 2020?", "My stomach hurts."] def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple ) -> Optional[int]: UpperCAmelCase_ : Optional[Any] = classifier("Who are you voting for in 2020?" , candidate_labels="politics" ) self.assertEqual(lowerCAmelCase_ , {"sequence": ANY(lowerCAmelCase_ ), "labels": [ANY(lowerCAmelCase_ )], "scores": [ANY(lowerCAmelCase_ )]} ) # No kwarg UpperCAmelCase_ : List[Any] = classifier("Who are you voting for in 2020?" , ["politics"] ) self.assertEqual(lowerCAmelCase_ , {"sequence": ANY(lowerCAmelCase_ ), "labels": [ANY(lowerCAmelCase_ )], "scores": [ANY(lowerCAmelCase_ )]} ) UpperCAmelCase_ : List[Any] = classifier("Who are you voting for in 2020?" , candidate_labels=["politics"] ) self.assertEqual(lowerCAmelCase_ , {"sequence": ANY(lowerCAmelCase_ ), "labels": [ANY(lowerCAmelCase_ )], "scores": [ANY(lowerCAmelCase_ )]} ) UpperCAmelCase_ : Optional[Any] = classifier("Who are you voting for in 2020?" , candidate_labels="politics, public health" ) self.assertEqual( lowerCAmelCase_ , {"sequence": ANY(lowerCAmelCase_ ), "labels": [ANY(lowerCAmelCase_ ), ANY(lowerCAmelCase_ )], "scores": [ANY(lowerCAmelCase_ ), ANY(lowerCAmelCase_ )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 ) UpperCAmelCase_ : Any = classifier("Who are you voting for in 2020?" , candidate_labels=["politics", "public health"] ) self.assertEqual( lowerCAmelCase_ , {"sequence": ANY(lowerCAmelCase_ ), "labels": [ANY(lowerCAmelCase_ ), ANY(lowerCAmelCase_ )], "scores": [ANY(lowerCAmelCase_ ), ANY(lowerCAmelCase_ )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 ) UpperCAmelCase_ : List[Any] = classifier( "Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="This text is about {}" ) self.assertEqual(lowerCAmelCase_ , {"sequence": ANY(lowerCAmelCase_ ), "labels": [ANY(lowerCAmelCase_ )], "scores": [ANY(lowerCAmelCase_ )]} ) # https://github.com/huggingface/transformers/issues/13846 UpperCAmelCase_ : List[str] = classifier(["I am happy"] , ["positive", "negative"] ) self.assertEqual( lowerCAmelCase_ , [ {"sequence": ANY(lowerCAmelCase_ ), "labels": [ANY(lowerCAmelCase_ ), ANY(lowerCAmelCase_ )], "scores": [ANY(lowerCAmelCase_ ), ANY(lowerCAmelCase_ )]} for i in range(1 ) ] , ) UpperCAmelCase_ : Tuple = classifier(["I am happy", "I am sad"] , ["positive", "negative"] ) self.assertEqual( lowerCAmelCase_ , [ {"sequence": ANY(lowerCAmelCase_ ), "labels": [ANY(lowerCAmelCase_ ), ANY(lowerCAmelCase_ )], "scores": [ANY(lowerCAmelCase_ ), ANY(lowerCAmelCase_ )]} for i in range(2 ) ] , ) with self.assertRaises(lowerCAmelCase_ ): classifier("" , candidate_labels="politics" ) with self.assertRaises(lowerCAmelCase_ ): classifier(lowerCAmelCase_ , candidate_labels="politics" ) with self.assertRaises(lowerCAmelCase_ ): classifier("Who are you voting for in 2020?" , candidate_labels="" ) with self.assertRaises(lowerCAmelCase_ ): classifier("Who are you voting for in 2020?" , candidate_labels=lowerCAmelCase_ ) with self.assertRaises(lowerCAmelCase_ ): classifier( "Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="Not formatting template" , ) with self.assertRaises(lowerCAmelCase_ ): classifier( "Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template=lowerCAmelCase_ , ) self.run_entailment_id(lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Pipeline ) -> int: UpperCAmelCase_ : Union[str, Any] = zero_shot_classifier.model.config UpperCAmelCase_ : Dict = config.labelaid UpperCAmelCase_ : Union[str, Any] = zero_shot_classifier.entailment_id UpperCAmelCase_ : List[str] = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2} self.assertEqual(zero_shot_classifier.entailment_id , -1 ) UpperCAmelCase_ : Union[str, Any] = {"entailment": 0, "neutral": 1, "contradiction": 2} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) UpperCAmelCase_ : Any = {"ENTAIL": 0, "NON-ENTAIL": 1} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) UpperCAmelCase_ : Optional[int] = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0} self.assertEqual(zero_shot_classifier.entailment_id , 2 ) UpperCAmelCase_ : Tuple = original_labelaid self.assertEqual(lowerCAmelCase_ , zero_shot_classifier.entailment_id ) @require_torch def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]: UpperCAmelCase_ : Tuple = pipeline( "zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , ) # There was a regression in 4.10 for this # Adding a test so we don't make the mistake again. # https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499 zero_shot_classifier( "Who are you voting for in 2020?" * 100 , candidate_labels=["politics", "public health", "science"] ) @require_torch def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: UpperCAmelCase_ : Optional[int] = pipeline( "zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , ) UpperCAmelCase_ : Union[str, Any] = zero_shot_classifier( "Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(lowerCAmelCase_ ) , { "sequence": "Who are you voting for in 2020?", "labels": ["science", "public health", "politics"], "scores": [0.3_3_3, 0.3_3_3, 0.3_3_3], } , ) @require_tf def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]: UpperCAmelCase_ : List[str] = pipeline( "zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="tf" , ) UpperCAmelCase_ : Optional[int] = zero_shot_classifier( "Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(lowerCAmelCase_ ) , { "sequence": "Who are you voting for in 2020?", "labels": ["science", "public health", "politics"], "scores": [0.3_3_3, 0.3_3_3, 0.3_3_3], } , ) @slow @require_torch def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: UpperCAmelCase_ : Dict = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="pt" ) UpperCAmelCase_ : str = zero_shot_classifier( "Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(lowerCAmelCase_ ) , { "sequence": "Who are you voting for in 2020?", "labels": ["politics", "public health", "science"], "scores": [0.9_7_6, 0.0_1_5, 0.0_0_9], } , ) UpperCAmelCase_ : str = zero_shot_classifier( "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks" " in an encoder-decoder configuration. The best performing models also connect the encoder and decoder" " through an attention mechanism. We propose a new simple network architecture, the Transformer, based" " solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two" " machine translation tasks show these models to be superior in quality while being more parallelizable" " and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014" " English-to-German translation task, improving over the existing best results, including ensembles by" " over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new" " single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small" " fraction of the training costs of the best models from the literature. We show that the Transformer" " generalizes well to other tasks by applying it successfully to English constituency parsing both with" " large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=lowerCAmelCase_ , ) self.assertEqual( nested_simplify(lowerCAmelCase_ ) , { "sequence": ( "The dominant sequence transduction models are based on complex recurrent or convolutional neural" " networks in an encoder-decoder configuration. The best performing models also connect the" " encoder and decoder through an attention mechanism. We propose a new simple network" " architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence" " and convolutions entirely. Experiments on two machine translation tasks show these models to be" " superior in quality while being more parallelizable and requiring significantly less time to" " train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task," " improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014" " English-to-French translation task, our model establishes a new single-model state-of-the-art" " BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training" " costs of the best models from the literature. We show that the Transformer generalizes well to" " other tasks by applying it successfully to English constituency parsing both with large and" " limited training data." ), "labels": ["translation", "machine learning", "vision", "statistics"], "scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8], } , ) @slow @require_tf def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]: UpperCAmelCase_ : Optional[Any] = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="tf" ) UpperCAmelCase_ : List[Any] = zero_shot_classifier( "Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(lowerCAmelCase_ ) , { "sequence": "Who are you voting for in 2020?", "labels": ["politics", "public health", "science"], "scores": [0.9_7_6, 0.0_1_5, 0.0_0_9], } , ) UpperCAmelCase_ : List[Any] = zero_shot_classifier( "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks" " in an encoder-decoder configuration. The best performing models also connect the encoder and decoder" " through an attention mechanism. We propose a new simple network architecture, the Transformer, based" " solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two" " machine translation tasks show these models to be superior in quality while being more parallelizable" " and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014" " English-to-German translation task, improving over the existing best results, including ensembles by" " over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new" " single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small" " fraction of the training costs of the best models from the literature. We show that the Transformer" " generalizes well to other tasks by applying it successfully to English constituency parsing both with" " large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=lowerCAmelCase_ , ) self.assertEqual( nested_simplify(lowerCAmelCase_ ) , { "sequence": ( "The dominant sequence transduction models are based on complex recurrent or convolutional neural" " networks in an encoder-decoder configuration. The best performing models also connect the" " encoder and decoder through an attention mechanism. We propose a new simple network" " architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence" " and convolutions entirely. Experiments on two machine translation tasks show these models to be" " superior in quality while being more parallelizable and requiring significantly less time to" " train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task," " improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014" " English-to-French translation task, our model establishes a new single-model state-of-the-art" " BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training" " costs of the best models from the literature. We show that the Transformer generalizes well to" " other tasks by applying it successfully to English constituency parsing both with large and" " limited training data." ), "labels": ["translation", "machine learning", "vision", "statistics"], "scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8], } , )
353
"""simple docstring""" from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch lowerCamelCase_ = logging.get_logger(__name__) class UpperCamelCase_ (__A ): __magic_name__ = ['''pixel_values'''] def __init__( self : List[Any] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 255 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : Any , ) -> None: super().__init__(**lowerCAmelCase_ ) UpperCAmelCase_ : Any = size if size is not None else {"shortest_edge": 256} UpperCAmelCase_ : List[str] = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ ) UpperCAmelCase_ : Any = crop_size if crop_size is not None else {"height": 224, "width": 224} UpperCAmelCase_ : Union[str, Any] = get_size_dict(lowerCAmelCase_ , param_name="crop_size" ) UpperCAmelCase_ : Dict = do_resize UpperCAmelCase_ : int = size UpperCAmelCase_ : Optional[int] = resample UpperCAmelCase_ : Tuple = do_center_crop UpperCAmelCase_ : Any = crop_size UpperCAmelCase_ : List[str] = do_rescale UpperCAmelCase_ : Dict = rescale_factor UpperCAmelCase_ : str = do_normalize UpperCAmelCase_ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase_ : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Any , ) -> np.ndarray: UpperCAmelCase_ : Any = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ ) if "shortest_edge" not in size: raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) UpperCAmelCase_ : List[Any] = get_resize_output_image_size(lowerCAmelCase_ , size=size["shortest_edge"] , default_to_square=lowerCAmelCase_ ) return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Dict , ) -> np.ndarray: UpperCAmelCase_ : List[str] = get_size_dict(lowerCAmelCase_ ) if "height" not in size or "width" not in size: raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" ) return center_crop(lowerCAmelCase_ , size=(size["height"], size["width"]) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Optional[Any] ) -> np.ndarray: return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Optional[Any] , ) -> np.ndarray: return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : Tuple , ) -> List[str]: UpperCAmelCase_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize UpperCAmelCase_ : List[str] = size if size is not None else self.size UpperCAmelCase_ : Optional[int] = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = resample if resample is not None else self.resample UpperCAmelCase_ : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else self.crop_size UpperCAmelCase_ : List[Any] = get_size_dict(lowerCAmelCase_ , param_name="crop_size" ) UpperCAmelCase_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase_ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase_ : Dict = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase_ : Optional[int] = image_mean if image_mean is not None else self.image_mean UpperCAmelCase_ : List[str] = image_std if image_std is not None else self.image_std UpperCAmelCase_ : Optional[Any] = make_list_of_images(lowerCAmelCase_ ) if not valid_images(lowerCAmelCase_ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. UpperCAmelCase_ : Optional[Any] = [to_numpy_array(lowerCAmelCase_ ) for image in images] if do_resize: UpperCAmelCase_ : Union[str, Any] = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images] if do_center_crop: UpperCAmelCase_ : Tuple = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images] if do_rescale: UpperCAmelCase_ : Union[str, Any] = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images] if do_normalize: UpperCAmelCase_ : Any = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images] UpperCAmelCase_ : Any = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images] UpperCAmelCase_ : int = {"pixel_values": images} return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Tuple] = None ) -> Optional[int]: UpperCAmelCase_ : Dict = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) if is_torch_tensor(lowerCAmelCase_ ): UpperCAmelCase_ : Optional[int] = target_sizes.numpy() UpperCAmelCase_ : Dict = [] for idx in range(len(lowerCAmelCase_ ) ): UpperCAmelCase_ : List[str] = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=lowerCAmelCase_ ) UpperCAmelCase_ : Any = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(lowerCAmelCase_ ) else: UpperCAmelCase_ : Tuple = logits.argmax(dim=1 ) UpperCAmelCase_ : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
253
0
"""simple docstring""" import operator def _snake_case ( lowerCamelCase__ : list , lowerCamelCase__ : bool = False , lowerCamelCase__ : list | None = None ) -> list: lowerCamelCase_ : Tuple =operator.lt if reverse else operator.gt lowerCamelCase_ : int =solution or [] if not arr: return solution lowerCamelCase_ : Any =[arr.pop(0 )] for i, item in enumerate(lowerCamelCase__ ): if _operator(lowerCamelCase__ , sublist[-1] ): sublist.append(lowerCamelCase__ ) arr.pop(lowerCamelCase__ ) # merging sublist into solution list if not solution: solution.extend(lowerCamelCase__ ) else: while sublist: lowerCamelCase_ : Dict =sublist.pop(0 ) for i, xx in enumerate(lowerCamelCase__ ): if not _operator(lowerCamelCase__ , lowerCamelCase__ ): solution.insert(lowerCamelCase__ , lowerCamelCase__ ) break else: solution.append(lowerCamelCase__ ) strand_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
144
"""simple docstring""" from sklearn.metrics import fa_score import datasets A__ : List[str] = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n' A__ : List[Any] = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n' A__ : Optional[int] = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class lowercase__ ( datasets.Metric ): def UpperCAmelCase__ ( self : int ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("int32" ) ), "references": datasets.Sequence(datasets.Value("int32" ) ), } if self.config_name == "multilabel" else { "predictions": datasets.Value("int32" ), "references": datasets.Value("int32" ), } ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] , ) def UpperCAmelCase__ ( self : List[str] , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : int=None , snake_case__ : Optional[int]=1 , snake_case__ : int="binary" , snake_case__ : List[str]=None ): lowerCamelCase_ : str =fa_score( snake_case__ , snake_case__ , labels=snake_case__ , pos_label=snake_case__ , average=snake_case__ , sample_weight=snake_case__ ) return {"f1": float(snake_case__ ) if score.size == 1 else score}
144
1
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SegformerConfig, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __A = logging.get_logger(__name__) def __a ( lowerCAmelCase_ : int ,lowerCAmelCase_ : List[str]=False ) -> Any: '''simple docstring''' UpperCAmelCase_= OrderedDict() for key, value in state_dict.items(): if encoder_only and not key.startswith("""head""" ): UpperCAmelCase_= """segformer.encoder.""" + key if key.startswith("""backbone""" ): UpperCAmelCase_= key.replace("""backbone""" ,"""segformer.encoder""" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 UpperCAmelCase_= key[key.find("""patch_embed""" ) + len("""patch_embed""" )] UpperCAmelCase_= key.replace(F"""patch_embed{idx}""" ,F"""patch_embeddings.{int(lowerCAmelCase_ )-1}""" ) if "norm" in key: UpperCAmelCase_= key.replace("""norm""" ,"""layer_norm""" ) if "segformer.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 UpperCAmelCase_= key[key.find("""segformer.encoder.layer_norm""" ) + len("""segformer.encoder.layer_norm""" )] UpperCAmelCase_= key.replace(F"""layer_norm{idx}""" ,F"""layer_norm.{int(lowerCAmelCase_ )-1}""" ) if "layer_norm1" in key: UpperCAmelCase_= key.replace("""layer_norm1""" ,"""layer_norm_1""" ) if "layer_norm2" in key: UpperCAmelCase_= key.replace("""layer_norm2""" ,"""layer_norm_2""" ) if "block" in key: # replace for example block1 by block.0 UpperCAmelCase_= key[key.find("""block""" ) + len("""block""" )] UpperCAmelCase_= key.replace(F"""block{idx}""" ,F"""block.{int(lowerCAmelCase_ )-1}""" ) if "attn.q" in key: UpperCAmelCase_= key.replace("""attn.q""" ,"""attention.self.query""" ) if "attn.proj" in key: UpperCAmelCase_= key.replace("""attn.proj""" ,"""attention.output.dense""" ) if "attn" in key: UpperCAmelCase_= key.replace("""attn""" ,"""attention.self""" ) if "fc1" in key: UpperCAmelCase_= key.replace("""fc1""" ,"""dense1""" ) if "fc2" in key: UpperCAmelCase_= key.replace("""fc2""" ,"""dense2""" ) if "linear_pred" in key: UpperCAmelCase_= key.replace("""linear_pred""" ,"""classifier""" ) if "linear_fuse" in key: UpperCAmelCase_= key.replace("""linear_fuse.conv""" ,"""linear_fuse""" ) UpperCAmelCase_= key.replace("""linear_fuse.bn""" ,"""batch_norm""" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 UpperCAmelCase_= key[key.find("""linear_c""" ) + len("""linear_c""" )] UpperCAmelCase_= key.replace(F"""linear_c{idx}""" ,F"""linear_c.{int(lowerCAmelCase_ )-1}""" ) if key.startswith("""head""" ): UpperCAmelCase_= key.replace("""head""" ,"""classifier""" ) UpperCAmelCase_= value return new_state_dict def __a ( lowerCAmelCase_ : Optional[Any] ,lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]: '''simple docstring''' for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) UpperCAmelCase_= state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.weight""" ) UpperCAmelCase_= state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.bias""" ) # next, add keys and values (in that order) to the state dict UpperCAmelCase_= kv_weight[ : config.hidden_sizes[i], : ] UpperCAmelCase_= kv_bias[: config.hidden_sizes[i]] UpperCAmelCase_= kv_weight[ config.hidden_sizes[i] :, : ] UpperCAmelCase_= kv_bias[ config.hidden_sizes[i] : ] def __a ( ) -> int: '''simple docstring''' UpperCAmelCase_= """http://images.cocodataset.org/val2017/000000039769.jpg""" UpperCAmelCase_= Image.open(requests.get(lowerCAmelCase_ ,stream=lowerCAmelCase_ ).raw ) return image @torch.no_grad() def __a ( lowerCAmelCase_ : str ,lowerCAmelCase_ : Dict ,lowerCAmelCase_ : Optional[Any] ) -> List[str]: '''simple docstring''' UpperCAmelCase_= SegformerConfig() UpperCAmelCase_= False # set attributes based on model_name UpperCAmelCase_= """huggingface/label-files""" if "segformer" in model_name: UpperCAmelCase_= model_name[len("""segformer.""" ) : len("""segformer.""" ) + 2] if "ade" in model_name: UpperCAmelCase_= 1_50 UpperCAmelCase_= """ade20k-id2label.json""" UpperCAmelCase_= (1, 1_50, 1_28, 1_28) elif "city" in model_name: UpperCAmelCase_= 19 UpperCAmelCase_= """cityscapes-id2label.json""" UpperCAmelCase_= (1, 19, 1_28, 1_28) else: raise ValueError(F"""Model {model_name} not supported""" ) elif "mit" in model_name: UpperCAmelCase_= True UpperCAmelCase_= model_name[4:6] UpperCAmelCase_= 10_00 UpperCAmelCase_= """imagenet-1k-id2label.json""" UpperCAmelCase_= (1, 10_00) else: raise ValueError(F"""Model {model_name} not supported""" ) # set config attributes UpperCAmelCase_= json.load(open(hf_hub_download(lowerCAmelCase_ ,lowerCAmelCase_ ,repo_type="""dataset""" ) ,"""r""" ) ) UpperCAmelCase_= {int(lowerCAmelCase_ ): v for k, v in idalabel.items()} UpperCAmelCase_= idalabel UpperCAmelCase_= {v: k for k, v in idalabel.items()} if size == "b0": pass elif size == "b1": UpperCAmelCase_= [64, 1_28, 3_20, 5_12] UpperCAmelCase_= 2_56 elif size == "b2": UpperCAmelCase_= [64, 1_28, 3_20, 5_12] UpperCAmelCase_= 7_68 UpperCAmelCase_= [3, 4, 6, 3] elif size == "b3": UpperCAmelCase_= [64, 1_28, 3_20, 5_12] UpperCAmelCase_= 7_68 UpperCAmelCase_= [3, 4, 18, 3] elif size == "b4": UpperCAmelCase_= [64, 1_28, 3_20, 5_12] UpperCAmelCase_= 7_68 UpperCAmelCase_= [3, 8, 27, 3] elif size == "b5": UpperCAmelCase_= [64, 1_28, 3_20, 5_12] UpperCAmelCase_= 7_68 UpperCAmelCase_= [3, 6, 40, 3] else: raise ValueError(F"""Size {size} not supported""" ) # load image processor (only resize + normalize) UpperCAmelCase_= SegformerImageProcessor( image_scale=(5_12, 5_12) ,keep_ratio=lowerCAmelCase_ ,align=lowerCAmelCase_ ,do_random_crop=lowerCAmelCase_ ) # prepare image UpperCAmelCase_= prepare_img() UpperCAmelCase_= image_processor(images=lowerCAmelCase_ ,return_tensors="""pt""" ).pixel_values logger.info(F"""Converting model {model_name}...""" ) # load original state dict if encoder_only: UpperCAmelCase_= torch.load(lowerCAmelCase_ ,map_location=torch.device("""cpu""" ) ) else: UpperCAmelCase_= torch.load(lowerCAmelCase_ ,map_location=torch.device("""cpu""" ) )["""state_dict"""] # rename keys UpperCAmelCase_= rename_keys(lowerCAmelCase_ ,encoder_only=lowerCAmelCase_ ) if not encoder_only: del state_dict["decode_head.conv_seg.weight"] del state_dict["decode_head.conv_seg.bias"] # key and value matrices need special treatment read_in_k_v(lowerCAmelCase_ ,lowerCAmelCase_ ) # create HuggingFace model and load state dict if encoder_only: UpperCAmelCase_= False UpperCAmelCase_= SegformerForImageClassification(lowerCAmelCase_ ) else: UpperCAmelCase_= SegformerForSemanticSegmentation(lowerCAmelCase_ ) model.load_state_dict(lowerCAmelCase_ ) model.eval() # forward pass UpperCAmelCase_= model(lowerCAmelCase_ ) UpperCAmelCase_= outputs.logits # set expected_slice based on model name # ADE20k checkpoints if model_name == "segformer.b0.512x512.ade.160k": UpperCAmelCase_= torch.tensor( [ [[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]], [[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]], [[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]], ] ) elif model_name == "segformer.b1.512x512.ade.160k": UpperCAmelCase_= torch.tensor( [ [[-7.5_820, -8.7_231, -8.3_215], [-8.0_600, -10.3_529, -10.0_304], [-7.5_208, -9.4_103, -9.6_239]], [[-12.6_918, -13.8_994, -13.7_137], [-13.3_196, -15.7_523, -15.4_789], [-12.9_343, -14.8_757, -14.9_689]], [[-11.1_911, -11.9_421, -11.3_243], [-11.3_342, -13.6_839, -13.3_581], [-10.3_909, -12.1_832, -12.4_858]], ] ) elif model_name == "segformer.b2.512x512.ade.160k": UpperCAmelCase_= torch.tensor( [ [[-11.8_173, -14.3_850, -16.3_128], [-14.5_648, -16.5_804, -18.6_568], [-14.7_223, -15.7_387, -18.4_218]], [[-15.7_290, -17.9_171, -19.4_423], [-18.3_105, -19.9_448, -21.4_661], [-17.9_296, -18.6_497, -20.7_910]], [[-15.0_783, -17.0_336, -18.2_789], [-16.8_771, -18.6_870, -20.1_612], [-16.2_454, -17.1_426, -19.5_055]], ] ) elif model_name == "segformer.b3.512x512.ade.160k": UpperCAmelCase_= torch.tensor( [ [[-9.0_878, -10.2_081, -10.1_891], [-9.3_144, -10.7_941, -10.9_843], [-9.2_294, -10.3_855, -10.5_704]], [[-12.2_316, -13.9_068, -13.6_102], [-12.9_161, -14.3_702, -14.3_235], [-12.5_233, -13.7_174, -13.7_932]], [[-14.6_275, -15.2_490, -14.9_727], [-14.3_400, -15.9_687, -16.2_827], [-14.1_484, -15.4_033, -15.8_937]], ] ) elif model_name == "segformer.b4.512x512.ade.160k": UpperCAmelCase_= torch.tensor( [ [[-12.3_144, -13.2_447, -14.0_802], [-13.3_614, -14.5_816, -15.6_117], [-13.3_340, -14.4_433, -16.2_219]], [[-19.2_781, -20.4_128, -20.7_506], [-20.6_153, -21.6_566, -22.0_998], [-19.9_800, -21.0_430, -22.1_494]], [[-18.8_739, -19.7_804, -21.1_834], [-20.1_233, -21.6_765, -23.2_944], [-20.0_315, -21.2_641, -23.6_944]], ] ) elif model_name == "segformer.b5.640x640.ade.160k": UpperCAmelCase_= torch.tensor( [ [[-9.5_524, -12.0_835, -11.7_348], [-10.5_229, -13.6_446, -14.5_662], [-9.5_842, -12.8_851, -13.9_414]], [[-15.3_432, -17.5_323, -17.0_818], [-16.3_330, -18.9_255, -19.2_101], [-15.1_340, -17.7_848, -18.3_971]], [[-12.6_072, -14.9_486, -14.6_631], [-13.7_629, -17.0_907, -17.7_745], [-12.7_899, -16.1_695, -17.1_671]], ] ) # Cityscapes checkpoints elif model_name == "segformer.b0.1024x1024.city.160k": UpperCAmelCase_= torch.tensor( [ [[-11.9_295, -13.4_057, -14.8_106], [-13.3_431, -14.8_179, -15.3_781], [-14.2_836, -15.5_942, -16.1_588]], [[-11.4_906, -12.8_067, -13.6_564], [-13.1_189, -14.0_500, -14.1_543], [-13.8_748, -14.5_136, -14.8_789]], [[0.5_374, 0.1_067, -0.4_742], [0.1_141, -0.2_255, -0.7_099], [-0.3_000, -0.5_924, -1.3_105]], ] ) elif model_name == "segformer.b0.512x1024.city.160k": UpperCAmelCase_= torch.tensor( [ [[-7.8_217, -9.8_767, -10.1_717], [-9.4_438, -10.9_058, -11.4_047], [-9.7_939, -12.3_495, -12.1_079]], [[-7.1_514, -9.5_336, -10.0_860], [-9.7_776, -11.6_822, -11.8_439], [-10.1_411, -12.7_655, -12.8_972]], [[0.3_021, 0.0_805, -0.2_310], [-0.0_328, -0.1_605, -0.2_714], [-0.1_408, -0.5_477, -0.6_976]], ] ) elif model_name == "segformer.b0.640x1280.city.160k": UpperCAmelCase_= torch.tensor( [ [ [-1.1372E01, -1.2787E01, -1.3477E01], [-1.2536E01, -1.4194E01, -1.4409E01], [-1.3217E01, -1.4888E01, -1.5327E01], ], [ [-1.4791E01, -1.7122E01, -1.8277E01], [-1.7163E01, -1.9192E01, -1.9533E01], [-1.7897E01, -1.9991E01, -2.0315E01], ], [ [7.6723E-01, 4.1921E-01, -7.7878E-02], [4.7772E-01, 9.5557E-03, -2.8082E-01], [3.6032E-01, -2.4826E-01, -5.1168E-01], ], ] ) elif model_name == "segformer.b0.768x768.city.160k": UpperCAmelCase_= torch.tensor( [ [[-9.4_959, -11.3_087, -11.7_479], [-11.0_025, -12.6_540, -12.3_319], [-11.4_064, -13.0_487, -12.9_905]], [[-9.8_905, -11.3_084, -12.0_854], [-11.1_726, -12.7_698, -12.9_583], [-11.5_985, -13.3_278, -14.1_774]], [[0.2_213, 0.0_192, -0.2_466], [-0.1_731, -0.4_213, -0.4_874], [-0.3_126, -0.6_541, -1.1_389]], ] ) elif model_name == "segformer.b1.1024x1024.city.160k": UpperCAmelCase_= torch.tensor( [ [[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]], [[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]], [[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]], ] ) elif model_name == "segformer.b2.1024x1024.city.160k": UpperCAmelCase_= torch.tensor( [ [[-16.0_976, -16.4_856, -17.3_962], [-16.6_234, -19.0_342, -19.7_685], [-16.0_900, -18.0_661, -19.1_180]], [[-18.4_750, -18.8_488, -19.5_074], [-19.4_030, -22.1_570, -22.5_977], [-19.1_191, -20.8_486, -22.3_783]], [[-4.5_178, -5.5_037, -6.5_109], [-5.0_884, -7.2_174, -8.0_334], [-4.4_156, -5.8_117, -7.2_970]], ] ) elif model_name == "segformer.b3.1024x1024.city.160k": UpperCAmelCase_= torch.tensor( [ [[-14.2_081, -14.4_732, -14.1_977], [-14.5_867, -16.4_423, -16.6_356], [-13.4_441, -14.9_685, -16.8_696]], [[-14.4_576, -14.7_073, -15.0_451], [-15.0_816, -17.6_237, -17.9_873], [-14.4_213, -16.0_199, -18.5_992]], [[-4.7_349, -4.9_588, -5.0_966], [-4.3_210, -6.9_325, -7.2_591], [-3.4_312, -4.7_484, -7.1_917]], ] ) elif model_name == "segformer.b4.1024x1024.city.160k": UpperCAmelCase_= torch.tensor( [ [[-11.7_737, -11.9_526, -11.3_273], [-13.6_692, -14.4_574, -13.8_878], [-13.8_937, -14.6_924, -15.9_345]], [[-14.6_706, -14.5_330, -14.1_306], [-16.1_502, -16.8_180, -16.4_269], [-16.8_338, -17.8_939, -20.1_746]], [[1.0_491, 0.8_289, 1.0_310], [1.1_044, 0.5_219, 0.8_055], [1.0_899, 0.6_926, 0.5_590]], ] ) elif model_name == "segformer.b5.1024x1024.city.160k": UpperCAmelCase_= torch.tensor( [ [[-12.5_641, -13.4_777, -13.0_684], [-13.9_587, -15.8_983, -16.6_557], [-13.3_109, -15.7_350, -16.3_141]], [[-14.7_074, -15.4_352, -14.5_944], [-16.6_353, -18.1_663, -18.6_120], [-15.1_702, -18.0_329, -18.1_547]], [[-1.7_990, -2.0_951, -1.7_784], [-2.6_397, -3.8_245, -3.9_686], [-1.5_264, -2.8_126, -2.9_316]], ] ) else: UpperCAmelCase_= logits.argmax(-1 ).item() print("""Predicted class:""" ,model.config.idalabel[predicted_class_idx] ) # verify logits if not encoder_only: assert logits.shape == expected_shape assert torch.allclose(logits[0, :3, :3, :3] ,lowerCAmelCase_ ,atol=1E-2 ) # finally, save model and image processor logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ ) model.save_pretrained(lowerCAmelCase_ ) image_processor.save_pretrained(lowerCAmelCase_ ) if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''segformer.b0.512x512.ade.160k''', type=str, help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) __A = parser.parse_args() convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
277
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class lowercase ( snake_case__): """simple docstring""" def __init__( self : int , __UpperCAmelCase : pyspark.sql.DataFrame , __UpperCAmelCase : Optional[NamedSplit] = None , __UpperCAmelCase : Optional[Features] = None , __UpperCAmelCase : bool = True , __UpperCAmelCase : str = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : str = None , __UpperCAmelCase : bool = True , __UpperCAmelCase : str = "arrow" , **__UpperCAmelCase : str , ) -> Dict: super().__init__( split=__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase , streaming=__UpperCAmelCase , **__UpperCAmelCase , ) UpperCAmelCase_= load_from_cache_file UpperCAmelCase_= file_format UpperCAmelCase_= Spark( df=__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , working_dir=__UpperCAmelCase , **__UpperCAmelCase , ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) UpperCAmelCase_= None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=__UpperCAmelCase , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
277
1
import json import os import unittest from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class _a (__magic_name__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__: Tuple = CTRLTokenizer UpperCAmelCase__: Optional[Any] = False UpperCAmelCase__: Optional[int] = False def __A ( self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt A__ : List[Any] = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""] A__ : Optional[int] = dict(zip(A__ , range(len(A__ ) ) ) ) A__ : Any = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""] A__ : str = {"""unk_token""": """<unk>"""} A__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) A__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(A__ ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(A__ ) ) def __A ( self , **A__ ): kwargs.update(self.special_tokens_map ) return CTRLTokenizer.from_pretrained(self.tmpdirname , **A__ ) def __A ( self , A__ ): A__ : Optional[Any] = """adapt react readapt apt""" A__ : Any = """adapt react readapt apt""" return input_text, output_text def __A ( self ): A__ : Dict = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) A__ : int = """adapt react readapt apt""" A__ : Union[str, Any] = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split() A__ : List[Any] = tokenizer.tokenize(A__ ) self.assertListEqual(A__ , A__ ) A__ : Tuple = tokens + [tokenizer.unk_token] A__ : int = [0, 1, 2, 4, 5, 1, 0, 3, 6] self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , A__ )
192
import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( """files""" , [ ["""full:README.md""", """dataset_infos.json"""], ["""empty:README.md""", """dataset_infos.json"""], ["""dataset_infos.json"""], ["""full:README.md"""], ] , ) def UpperCamelCase (lowercase_: List[str] , lowercase_: Optional[int] ) -> List[str]: A__ : Tuple = tmp_path_factory.mktemp("""dset_infos_dir""" ) if "full:README.md" in files: with open(dataset_infos_dir / """README.md""" , """w""" ) as f: f.write("""---\ndataset_info:\n dataset_size: 42\n---""" ) if "empty:README.md" in files: with open(dataset_infos_dir / """README.md""" , """w""" ) as f: f.write("""""" ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / """dataset_infos.json""" , """w""" ) as f: f.write("""{\"default\": {\"dataset_size\": 42}}""" ) A__ : List[Any] = DatasetInfosDict.from_directory(lowercase_ ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( """dataset_info""" , [ DatasetInfo(), DatasetInfo( description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , ), ] , ) def UpperCamelCase (lowercase_: str , lowercase_: DatasetInfo ) -> List[Any]: A__ : Union[str, Any] = str(lowercase_ ) dataset_info.write_to_directory(lowercase_ ) A__ : List[Any] = DatasetInfo.from_directory(lowercase_ ) assert dataset_info == reloaded assert os.path.exists(os.path.join(lowercase_ , """dataset_info.json""" ) ) def UpperCamelCase () -> List[Any]: A__ : Union[str, Any] = DatasetInfo( description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , ) A__ : Dict = dataset_info._to_yaml_dict() assert sorted(lowercase_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) ) A__ : Union[str, Any] = yaml.safe_dump(lowercase_ ) A__ : List[Any] = yaml.safe_load(lowercase_ ) assert dataset_info_yaml_dict == reloaded def UpperCamelCase () -> List[str]: A__ : Optional[int] = DatasetInfo() A__ : List[Any] = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( """dataset_infos_dict""" , [ DatasetInfosDict(), DatasetInfosDict({"""default""": DatasetInfo()} ), DatasetInfosDict({"""my_config_name""": DatasetInfo()} ), DatasetInfosDict( { """default""": DatasetInfo( description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , ) } ), DatasetInfosDict( { """v1""": DatasetInfo(dataset_size=42 ), """v2""": DatasetInfo(dataset_size=1337 ), } ), ] , ) def UpperCamelCase (lowercase_: Tuple , lowercase_: DatasetInfosDict ) -> Optional[Any]: A__ : List[Any] = str(lowercase_ ) dataset_infos_dict.write_to_directory(lowercase_ ) A__ : Dict = DatasetInfosDict.from_directory(lowercase_ ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): A__ : Optional[int] = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml A__ : List[str] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(lowercase_ , """README.md""" ) )
192
1
import math import qiskit def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = 1 ): if ( isinstance(lowercase__ , lowercase__ ) or isinstance(lowercase__ , lowercase__ ) or isinstance(lowercase__ , lowercase__ ) ): raise TypeError('inputs must be integers.' ) if (input_a < 0) or (input_a < 0) or (carry_in < 0): raise ValueError('inputs must be positive.' ) if ( (math.floor(lowercase__ ) != input_a) or (math.floor(lowercase__ ) != input_a) or (math.floor(lowercase__ ) != carry_in) ): raise ValueError('inputs must be exact integers.' ) if (input_a > 2) or (input_a > 2) or (carry_in > 2): raise ValueError('inputs must be less or equal to 2.' ) # build registers __UpperCamelCase =qiskit.QuantumRegister(4 , 'qr' ) __UpperCamelCase =qiskit.ClassicalRegister(2 , 'cr' ) # list the entries __UpperCamelCase =[input_a, input_a, carry_in] __UpperCamelCase =qiskit.QuantumCircuit(lowercase__ , lowercase__ ) for i in range(0 , 3 ): if entry[i] == 2: quantum_circuit.h(lowercase__ ) # for hadamard entries elif entry[i] == 1: quantum_circuit.x(lowercase__ ) # for 1 entries elif entry[i] == 0: quantum_circuit.i(lowercase__ ) # for 0 entries # build the circuit quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate quantum_circuit.cx(0 , 1 ) quantum_circuit.ccx(1 , 2 , 3 ) quantum_circuit.cx(1 , 2 ) quantum_circuit.cx(0 , 1 ) quantum_circuit.measure([2, 3] , lowercase__ ) # measure the last two qbits __UpperCamelCase =qiskit.Aer.get_backend('aer_simulator' ) __UpperCamelCase =qiskit.execute(lowercase__ , lowercase__ , shots=10_00 ) return job.result().get_counts(lowercase__ ) if __name__ == "__main__": print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
356
import os import random import sys from . import cryptomath_module as cryptoMath # noqa: N812 from . import rabin_miller as rabinMiller # noqa: N812 def _UpperCAmelCase ( ): print('Making key files...' ) make_key_files('rsa' , 10_24 ) print('Key files generation successful.' ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ): print('Generating prime p...' ) __UpperCamelCase =rabinMiller.generate_large_prime(SCREAMING_SNAKE_CASE__ ) print('Generating prime q...' ) __UpperCamelCase =rabinMiller.generate_large_prime(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =p * q print('Generating e that is relatively prime to (p - 1) * (q - 1)...' ) while True: __UpperCamelCase =random.randrange(2 ** (key_size - 1) , 2 ** (key_size) ) if cryptoMath.gcd(SCREAMING_SNAKE_CASE__ , (p - 1) * (q - 1) ) == 1: break print('Calculating d that is mod inverse of e...' ) __UpperCamelCase =cryptoMath.find_mod_inverse(SCREAMING_SNAKE_CASE__ , (p - 1) * (q - 1) ) __UpperCamelCase =(n, e) __UpperCamelCase =(n, d) return (public_key, private_key) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ): if os.path.exists(F'{name}_pubkey.txt' ) or os.path.exists(F'{name}_privkey.txt' ): print('\nWARNING:' ) print( F'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n' 'Use a different name or delete these files and re-run this program.' ) sys.exit() __UpperCamelCase , __UpperCamelCase =generate_key(SCREAMING_SNAKE_CASE__ ) print(F'\nWriting public key to file {name}_pubkey.txt...' ) with open(F'{name}_pubkey.txt' , 'w' ) as out_file: out_file.write(F'{key_size},{public_key[0]},{public_key[1]}' ) print(F'Writing private key to file {name}_privkey.txt...' ) with open(F'{name}_privkey.txt' , 'w' ) as out_file: out_file.write(F'{key_size},{private_key[0]},{private_key[1]}' ) if __name__ == "__main__": main()
117
0
'''simple docstring''' # This code is adapted from OpenAI's release # https://github.com/openai/human-eval/blob/master/human_eval/execution.py import contextlib import faulthandler import io import multiprocessing import os import platform import signal import tempfile def snake_case_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Any ) -> Any: UpperCAmelCase : Dict = multiprocessing.Manager() UpperCAmelCase : Dict = manager.list() UpperCAmelCase : int = multiprocessing.Process(target=_lowerCAmelCase , args=(check_program, result, timeout) ) p.start() p.join(timeout=timeout + 1 ) if p.is_alive(): p.kill() if not result: result.append('''timed out''' ) return { "task_id": task_id, "passed": result[0] == "passed", "result": result[0], "completion_id": completion_id, } def snake_case_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : List[str] ) -> Optional[int]: with create_tempdir(): # These system calls are needed when cleaning up tempdir. import os import shutil UpperCAmelCase : List[str] = shutil.rmtree UpperCAmelCase : Optional[Any] = os.rmdir UpperCAmelCase : List[str] = os.chdir # Disable functionalities that can make destructive changes to the test. reliability_guard() # Run program. try: UpperCAmelCase : List[str] = {} with swallow_io(): with time_limit(_lowerCAmelCase ): exec(_lowerCAmelCase , _lowerCAmelCase ) result.append('''passed''' ) except TimeoutException: result.append('''timed out''' ) except BaseException as e: result.append(f"""failed: {e}""" ) # Needed for cleaning up. UpperCAmelCase : Union[str, Any] = rmtree UpperCAmelCase : Optional[Any] = rmdir UpperCAmelCase : List[Any] = chdir @contextlib.contextmanager def snake_case_ ( _lowerCAmelCase : Any ) -> str: def signal_handler(_lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] ): raise TimeoutException('''Timed out!''' ) signal.setitimer(signal.ITIMER_REAL , _lowerCAmelCase ) signal.signal(signal.SIGALRM , _lowerCAmelCase ) try: yield finally: signal.setitimer(signal.ITIMER_REAL , 0 ) @contextlib.contextmanager def snake_case_ ( ) -> str: UpperCAmelCase : Dict = WriteOnlyStringIO() with contextlib.redirect_stdout(_lowerCAmelCase ): with contextlib.redirect_stderr(_lowerCAmelCase ): with redirect_stdin(_lowerCAmelCase ): yield @contextlib.contextmanager def snake_case_ ( ) -> List[Any]: with tempfile.TemporaryDirectory() as dirname: with chdir(_lowerCAmelCase ): yield dirname class SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE ): """simple docstring""" pass class SCREAMING_SNAKE_CASE( io.StringIO ): """simple docstring""" def A ( self : Any , *__snake_case : Optional[int] , **__snake_case : Dict ) -> Union[str, Any]: raise OSError def A ( self : Optional[Any] , *__snake_case : List[Any] , **__snake_case : int ) -> Tuple: raise OSError def A ( self : Dict , *__snake_case : Tuple , **__snake_case : Optional[int] ) -> int: raise OSError def A ( self : List[str] , *__snake_case : str , **__snake_case : List[str] ) -> List[Any]: return False class SCREAMING_SNAKE_CASE( contextlib._RedirectStream ): # type: ignore """simple docstring""" lowerCamelCase__ = "stdin" @contextlib.contextmanager def snake_case_ ( _lowerCAmelCase : Optional[int] ) -> Optional[int]: if root == ".": yield return UpperCAmelCase : Union[str, Any] = os.getcwd() os.chdir(_lowerCAmelCase ) try: yield except BaseException as exc: raise exc finally: os.chdir(_lowerCAmelCase ) def snake_case_ ( _lowerCAmelCase : Union[str, Any]=None ) -> Union[str, Any]: if maximum_memory_bytes is not None: import resource resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) ) resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) ) if not platform.uname().system == "Darwin": resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) ) faulthandler.disable() import builtins UpperCAmelCase : Optional[int] = None UpperCAmelCase : Union[str, Any] = None import os UpperCAmelCase : Union[str, Any] = '''1''' UpperCAmelCase : Optional[int] = None UpperCAmelCase : int = None UpperCAmelCase : Optional[Any] = None UpperCAmelCase : Any = None UpperCAmelCase : Optional[Any] = None UpperCAmelCase : Optional[Any] = None UpperCAmelCase : List[Any] = None UpperCAmelCase : Dict = None UpperCAmelCase : Optional[Any] = None UpperCAmelCase : Union[str, Any] = None UpperCAmelCase : str = None UpperCAmelCase : List[Any] = None UpperCAmelCase : Any = None UpperCAmelCase : Optional[Any] = None UpperCAmelCase : str = None UpperCAmelCase : Optional[int] = None UpperCAmelCase : Dict = None UpperCAmelCase : Optional[int] = None UpperCAmelCase : Tuple = None UpperCAmelCase : Any = None UpperCAmelCase : List[str] = None UpperCAmelCase : Any = None UpperCAmelCase : Optional[int] = None UpperCAmelCase : Tuple = None UpperCAmelCase : List[str] = None UpperCAmelCase : Tuple = None UpperCAmelCase : Union[str, Any] = None import shutil UpperCAmelCase : int = None UpperCAmelCase : List[str] = None UpperCAmelCase : Dict = None import subprocess UpperCAmelCase : str = None # type: ignore UpperCAmelCase : Union[str, Any] = None import sys UpperCAmelCase : Tuple = None UpperCAmelCase : Any = None UpperCAmelCase : str = None UpperCAmelCase : Tuple = None UpperCAmelCase : int = None
23
from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__) _UpperCAmelCase : List[str] = { "EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json", # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : List[str] = "gpt_neox" def __init__( self , A_=50_432 , A_=6_144 , A_=44 , A_=64 , A_=24_576 , A_="gelu" , A_=0.25 , A_=10_000 , A_=0.0 , A_=0.0 , A_=0.1 , A_=2_048 , A_=0.02 , A_=1e-5 , A_=True , A_=0 , A_=2 , A_=False , A_=True , A_=None , **A_ , ) -> Tuple: """simple docstring""" super().__init__(bos_token_id=A_ , eos_token_id=A_ , **A_ ) UpperCamelCase = vocab_size UpperCamelCase = max_position_embeddings UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = rotary_pct UpperCamelCase = rotary_emb_base UpperCamelCase = attention_dropout UpperCamelCase = hidden_dropout UpperCamelCase = classifier_dropout UpperCamelCase = initializer_range UpperCamelCase = layer_norm_eps UpperCamelCase = use_cache UpperCamelCase = tie_word_embeddings UpperCamelCase = use_parallel_residual UpperCamelCase = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( 'The hidden size is not divisble by the number of attention heads! Make sure to update them!' ) def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" if self.rope_scaling is None: return if not isinstance(self.rope_scaling , A_ ) or len(self.rope_scaling ) != 2: raise ValueError( '`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ' F'''got {self.rope_scaling}''' ) UpperCamelCase = self.rope_scaling.get('type' , A_ ) UpperCamelCase = self.rope_scaling.get('factor' , A_ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(A_ , A_ ) or rope_scaling_factor <= 1.0: raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
222
0
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : str ): '''simple docstring''' if not head: return True # split the list to two parts _UpperCAmelCase = head.next, head while fast and fast.next: _UpperCAmelCase = fast.next.next _UpperCAmelCase = slow.next _UpperCAmelCase = slow.next _UpperCAmelCase = None # Don't forget here! But forget still works! # reverse the second part _UpperCAmelCase = None while second: _UpperCAmelCase = second.next _UpperCAmelCase = node _UpperCAmelCase = second _UpperCAmelCase = nxt # compare two parts # second part has the same or one less node while node: if node.val != head.val: return False _UpperCAmelCase = node.next _UpperCAmelCase = head.next return True def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' if not head or not head.next: return True # 1. Get the midpoint (slow) _UpperCAmelCase = head while fast and fast.next: _UpperCAmelCase = fast.next.next, slow.next # 2. Push the second half into the stack _UpperCAmelCase = [slow.val] while slow.next: _UpperCAmelCase = slow.next stack.append(slow.val ) # 3. Comparison while stack: if stack.pop() != cur.val: return False _UpperCAmelCase = cur.next return True def lowercase ( _SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' if not head or not head.next: return True _UpperCAmelCase = {} _UpperCAmelCase = 0 while head: if head.val in d: d[head.val].append(_SCREAMING_SNAKE_CASE ) else: _UpperCAmelCase = [pos] _UpperCAmelCase = head.next pos += 1 _UpperCAmelCase = pos - 1 _UpperCAmelCase = 0 for v in d.values(): if len(_SCREAMING_SNAKE_CASE ) % 2 != 0: middle += 1 else: _UpperCAmelCase = 0 for i in range(0 , len(_SCREAMING_SNAKE_CASE ) ): if v[i] + v[len(_SCREAMING_SNAKE_CASE ) - 1 - step] != checksum: return False step += 1 if middle > 1: return False return True
351
"""simple docstring""" from __future__ import annotations import unittest from transformers import MobileBertConfig, is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_MODEL_FOR_PRETRAINING_MAPPING, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertModel, ) @require_tf class _a ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase): """simple docstring""" UpperCamelCase__ = ( ( TFMobileBertModel, TFMobileBertForMaskedLM, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertForMultipleChoice, ) if is_tf_available() else () ) UpperCamelCase__ = ( { """feature-extraction""": TFMobileBertModel, """fill-mask""": TFMobileBertForMaskedLM, """question-answering""": TFMobileBertForQuestionAnswering, """text-classification""": TFMobileBertForSequenceClassification, """token-classification""": TFMobileBertForTokenClassification, """zero-shot""": TFMobileBertForSequenceClassification, } if is_tf_available() else {} ) UpperCamelCase__ = False UpperCamelCase__ = False def lowercase__ ( self : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : str=False )->Optional[Any]: _UpperCAmelCase = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase ) if return_labels: if model_class in get_values(__UpperCamelCase ): _UpperCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) return inputs_dict class _a ( lowerCAmelCase): """simple docstring""" def __init__( self : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Any=1_3 , __UpperCamelCase : Any=7 , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Dict=9_9 , __UpperCamelCase : Optional[int]=3_2 , __UpperCamelCase : Union[str, Any]=3_2 , __UpperCamelCase : List[str]=2 , __UpperCamelCase : Dict=4 , __UpperCamelCase : Optional[Any]=3_7 , __UpperCamelCase : List[str]="gelu" , __UpperCamelCase : List[Any]=0.1 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : Optional[Any]=5_1_2 , __UpperCamelCase : Any=1_6 , __UpperCamelCase : Dict=2 , __UpperCamelCase : Optional[int]=0.0_2 , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : Tuple=4 , __UpperCamelCase : List[str]=None , )->Any: _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = num_choices _UpperCAmelCase = scope _UpperCAmelCase = embedding_size def lowercase__ ( self : Optional[int] )->int: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase = MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowercase__ ( self : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] )->List[Any]: _UpperCAmelCase = TFMobileBertModel(config=__UpperCamelCase ) _UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCAmelCase = model(__UpperCamelCase ) _UpperCAmelCase = [input_ids, input_mask] _UpperCAmelCase = model(__UpperCamelCase ) _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowercase__ ( self : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] )->Tuple: _UpperCAmelCase = TFMobileBertForMaskedLM(config=__UpperCamelCase ) _UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase__ ( self : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : Any )->List[Any]: _UpperCAmelCase = TFMobileBertForNextSentencePrediction(config=__UpperCamelCase ) _UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def lowercase__ ( self : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Dict )->List[Any]: _UpperCAmelCase = TFMobileBertForPreTraining(config=__UpperCamelCase ) _UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual( result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] )->Any: _UpperCAmelCase = self.num_labels _UpperCAmelCase = TFMobileBertForSequenceClassification(config=__UpperCamelCase ) _UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase__ ( self : Dict , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] )->List[str]: _UpperCAmelCase = self.num_choices _UpperCAmelCase = TFMobileBertForMultipleChoice(config=__UpperCamelCase ) _UpperCAmelCase = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) _UpperCAmelCase = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) _UpperCAmelCase = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) _UpperCAmelCase = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowercase__ ( self : Any , __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Any )->Dict: _UpperCAmelCase = self.num_labels _UpperCAmelCase = TFMobileBertForTokenClassification(config=__UpperCamelCase ) _UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowercase__ ( self : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] )->List[Any]: _UpperCAmelCase = TFMobileBertForQuestionAnswering(config=__UpperCamelCase ) _UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase__ ( self : List[str] )->Optional[Any]: _UpperCAmelCase = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = config_and_inputs _UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict def lowercase__ ( self : List[Any] )->str: _UpperCAmelCase = TFMobileBertModelTest.TFMobileBertModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=3_7 ) def lowercase__ ( self : List[Any] )->List[str]: self.config_tester.run_common_tests() def lowercase__ ( self : Optional[Any] )->Union[str, Any]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*__UpperCamelCase ) def lowercase__ ( self : Any )->Union[str, Any]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*__UpperCamelCase ) def lowercase__ ( self : List[Any] )->Optional[int]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__UpperCamelCase ) def lowercase__ ( self : str )->Optional[int]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__UpperCamelCase ) def lowercase__ ( self : Any )->List[str]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*__UpperCamelCase ) def lowercase__ ( self : Dict )->Any: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*__UpperCamelCase ) def lowercase__ ( self : Any )->Optional[Any]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__UpperCamelCase ) def lowercase__ ( self : List[str] )->Tuple: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*__UpperCamelCase ) @slow def lowercase__ ( self : Tuple )->List[str]: # for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["google/mobilebert-uncased"]: _UpperCAmelCase = TFMobileBertModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) @require_tf class _a ( unittest.TestCase): """simple docstring""" @slow def lowercase__ ( self : str )->Dict: _UpperCAmelCase = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' ) _UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] ) _UpperCAmelCase = model(__UpperCamelCase )[0] _UpperCAmelCase = [1, 6, 3_0_5_2_2] self.assertEqual(output.shape , __UpperCamelCase ) _UpperCAmelCase = tf.constant( [ [ [-4.5_9_1_9_5_4_7, -9.2_4_8_2_9_5, -9.6_4_5_2_5_6], [-6.7_3_0_6_1_7_5, -6.4_4_0_2_8_4, -6.6_0_5_2_8_3_7], [-7.2_7_4_3_5_0_6, -6.7_8_4_7_9_1_5, -6.0_2_4_6_7_3], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __UpperCamelCase , atol=1e-4 )
326
0
import math from collections.abc import Iterator from itertools import takewhile def a ( snake_case__: int ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def a ( ): '''simple docstring''' lowercase_ = 2 while True: if is_prime(snake_case__ ): yield num num += 1 def a ( snake_case__: int = 2_000_000 ): '''simple docstring''' return sum(takewhile(lambda snake_case__ : x < n , prime_generator() ) ) if __name__ == "__main__": print(f"{solution() = }")
30
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) _lowerCamelCase : str = { 'facebook/convnextv2-tiny-1k-224': 'https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json', } class lowercase ( __UpperCAmelCase , __UpperCAmelCase): __lowerCAmelCase : List[Any] = """convnextv2""" def __init__( self : int , _lowerCamelCase : str=3 , _lowerCamelCase : str=4 , _lowerCamelCase : List[Any]=4 , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Optional[int]="gelu" , _lowerCamelCase : Union[str, Any]=0.02 , _lowerCamelCase : List[str]=1E-12 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Optional[int]=2_24 , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Optional[Any]=None , **_lowerCamelCase : Optional[Any] , ): """simple docstring""" super().__init__(**_lowerCamelCase ) A_ : str = num_channels A_ : int = patch_size A_ : Union[str, Any] = num_stages A_ : Any = [96, 1_92, 3_84, 7_68] if hidden_sizes is None else hidden_sizes A_ : Any = [3, 3, 9, 3] if depths is None else depths A_ : Optional[int] = hidden_act A_ : Tuple = initializer_range A_ : int = layer_norm_eps A_ : List[Any] = drop_path_rate A_ : Union[str, Any] = image_size A_ : Any = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )] A_ , A_ : Tuple = get_aligned_output_features_output_indices( out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names )
167
0
def _lowerCamelCase( ) -> str: '''simple docstring''' __lowercase= 0 for i in range(1 , 1_0_0_1 ): total += i**i return str(lowercase__ )[-1_0:] if __name__ == "__main__": print(solution())
355
import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class A : def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=2 , lowerCAmelCase=9_9 , lowerCAmelCase=0 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=2 , lowerCAmelCase=4 , lowerCAmelCase="last" , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=0 , ): __lowercase= parent __lowercase= batch_size __lowercase= seq_length __lowercase= is_training __lowercase= use_input_lengths __lowercase= use_token_type_ids __lowercase= use_labels __lowercase= gelu_activation __lowercase= sinusoidal_embeddings __lowercase= causal __lowercase= asm __lowercase= n_langs __lowercase= vocab_size __lowercase= n_special __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_attention_heads __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= max_position_embeddings __lowercase= type_sequence_label_size __lowercase= initializer_range __lowercase= num_labels __lowercase= num_choices __lowercase= summary_type __lowercase= use_proj __lowercase= scope __lowercase= bos_token_id def _A (self ): __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase= random_attention_mask([self.batch_size, self.seq_length] ) __lowercase= None if self.use_input_lengths: __lowercase= ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length __lowercase= None if self.use_token_type_ids: __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) __lowercase= None __lowercase= None __lowercase= None if self.use_labels: __lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase= ids_tensor([self.batch_size] , 2 ).float() __lowercase= ids_tensor([self.batch_size] , self.num_choices ) __lowercase= self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _A (self ): return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= XLMModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , lengths=lowerCAmelCase , langs=lowerCAmelCase ) __lowercase= model(lowerCAmelCase , langs=lowerCAmelCase ) __lowercase= model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= XLMWithLMHeadModel(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= XLMForQuestionAnsweringSimple(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase ) __lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase ) __lowercase= outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= XLMForQuestionAnswering(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase ) __lowercase= model( lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , p_mask=lowerCAmelCase , ) __lowercase= model( lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , ) ((__lowercase), )= result_with_labels.to_tuple() __lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase ) ((__lowercase), )= result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= XLMForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase ) __lowercase= model(lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= self.num_labels __lowercase= XLMForTokenClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= self.num_choices __lowercase= XLMForMultipleChoice(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase= model( lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _A (self ): __lowercase= self.prepare_config_and_inputs() ( ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), )= config_and_inputs __lowercase= {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths} return config, inputs_dict @require_torch class A ( A_ , A_ , A_ , unittest.TestCase ): UpperCamelCase_ : int =( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) UpperCamelCase_ : Dict =( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable UpperCamelCase_ : str =( { '''feature-extraction''': XLMModel, '''fill-mask''': XLMWithLMHeadModel, '''question-answering''': XLMForQuestionAnsweringSimple, '''text-classification''': XLMForSequenceClassification, '''text-generation''': XLMWithLMHeadModel, '''token-classification''': XLMForTokenClassification, '''zero-shot''': XLMForSequenceClassification, } if is_torch_available() else {} ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('Fast' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ): __lowercase= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": __lowercase= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase ) __lowercase= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase ) return inputs_dict def _A (self ): __lowercase= XLMModelTester(self ) __lowercase= ConfigTester(self , config_class=lowerCAmelCase , emb_dim=3_7 ) def _A (self ): self.config_tester.run_common_tests() def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ): self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) self.assertListEqual( [isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase ) ) self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(lowerCAmelCase ): # adds PAD dummy token __lowercase= min_length + idx + 1 __lowercase= min_length + idx + 1 __lowercase= ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase ) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ): self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) self.assertListEqual( [isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase ) , ) self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(lowerCAmelCase ): # adds PAD dummy token __lowercase= min_length + idx + 1 __lowercase= (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase ) , ) pass @slow def _A (self ): for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase= XLMModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @require_torch class A ( unittest.TestCase ): @slow def _A (self ): __lowercase= XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' ) model.to(lowerCAmelCase ) __lowercase= torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=lowerCAmelCase ) # the president __lowercase= [ 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference __lowercase= model.generate(lowerCAmelCase , do_sample=lowerCAmelCase ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase )
304
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) lowerCAmelCase = {'processing_layoutxlm': ['LayoutXLMProcessor']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = ['LayoutXLMTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = ['LayoutXLMTokenizerFast'] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
110
class _a : def __init__( self: Any ) -> Tuple: """simple docstring""" lowercase__ = '''''' lowercase__ = '''''' lowercase__ = [] def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int ) -> int: """simple docstring""" if m == -1: return n + 1 elif n == -1: return m + 1 elif self.dp[m][n] > -1: return self.dp[m][n] else: if self.worda[m] == self.worda[n]: lowercase__ = self.__min_dist_top_down_dp(m - 1 , n - 1 ) else: lowercase__ = self.__min_dist_top_down_dp(UpperCamelCase_ , n - 1 ) lowercase__ = self.__min_dist_top_down_dp(m - 1 , UpperCamelCase_ ) lowercase__ = self.__min_dist_top_down_dp(m - 1 , n - 1 ) lowercase__ = 1 + min(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) return self.dp[m][n] def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: str ) -> int: """simple docstring""" lowercase__ = worda lowercase__ = worda lowercase__ = [[-1 for _ in range(len(UpperCamelCase_ ) )] for _ in range(len(UpperCamelCase_ ) )] return self.__min_dist_top_down_dp(len(UpperCamelCase_ ) - 1 , len(UpperCamelCase_ ) - 1 ) def lowerCamelCase_ ( self: int , UpperCamelCase_: str , UpperCamelCase_: str ) -> int: """simple docstring""" lowercase__ = worda lowercase__ = worda lowercase__ = len(UpperCamelCase_ ) lowercase__ = len(UpperCamelCase_ ) lowercase__ = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )] for i in range(m + 1 ): for j in range(n + 1 ): if i == 0: # first string is empty lowercase__ = j elif j == 0: # second string is empty lowercase__ = i elif worda[i - 1] == worda[j - 1]: # last characters are equal lowercase__ = self.dp[i - 1][j - 1] else: lowercase__ = self.dp[i][j - 1] lowercase__ = self.dp[i - 1][j] lowercase__ = self.dp[i - 1][j - 1] lowercase__ = 1 + min(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) return self.dp[m][n] if __name__ == "__main__": lowerCAmelCase = EditDistance() print('****************** Testing Edit Distance DP Algorithm ******************') print() lowerCAmelCase = input('Enter the first string: ').strip() lowerCAmelCase = input('Enter the second string: ').strip() print() print(f"""The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}""") print(f"""The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}""") print() print('*************** End of Testing Edit Distance DP Algorithm ***************')
110
1
"""simple docstring""" import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class lowercase__ ( snake_case__ ): _UpperCAmelCase :Tuple = "char" _UpperCAmelCase :Optional[Any] = "bpe" _UpperCAmelCase :Optional[int] = "wp" A__ : Any = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class lowercase__ ( snake_case__ ): _UpperCAmelCase :Union[str, Any] = ["image_processor", "char_tokenizer"] _UpperCAmelCase :int = "ViTImageProcessor" _UpperCAmelCase :List[str] = "MgpstrTokenizer" def __init__( self : Optional[int] , snake_case__ : Optional[int]=None , snake_case__ : List[Any]=None , **snake_case__ : int ): lowerCamelCase_ : Optional[int] =None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , snake_case__ , ) lowerCamelCase_ : Dict =kwargs.pop("feature_extractor" ) lowerCamelCase_ : Dict =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) lowerCamelCase_ : List[str] =tokenizer lowerCamelCase_ : str =AutoTokenizer.from_pretrained("gpt2" ) lowerCamelCase_ : Tuple =AutoTokenizer.from_pretrained("bert-base-uncased" ) super().__init__(snake_case__ , snake_case__ ) def __call__( self : Tuple , snake_case__ : Dict=None , snake_case__ : Tuple=None , snake_case__ : Union[str, Any]=None , **snake_case__ : List[Any] ): if images is None and text is None: raise ValueError("You need to specify either an `images` or `text` input to process." ) if images is not None: lowerCamelCase_ : Any =self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ ) if text is not None: lowerCamelCase_ : Dict =self.char_tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ ) if text is None: return inputs elif images is None: return encodings else: lowerCamelCase_ : List[Any] =encodings["input_ids"] return inputs def UpperCAmelCase__ ( self : int , snake_case__ : Any ): lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : int =sequences lowerCamelCase_ : Optional[int] =char_preds.size(0 ) lowerCamelCase_ , lowerCamelCase_ : List[Any] =self._decode_helper(snake_case__ , "char" ) lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] =self._decode_helper(snake_case__ , "bpe" ) lowerCamelCase_ , lowerCamelCase_ : Tuple =self._decode_helper(snake_case__ , "wp" ) lowerCamelCase_ : List[Any] =[] lowerCamelCase_ : List[str] =[] for i in range(snake_case__ ): lowerCamelCase_ : Optional[int] =[char_scores[i], bpe_scores[i], wp_scores[i]] lowerCamelCase_ : Dict =[char_strs[i], bpe_strs[i], wp_strs[i]] lowerCamelCase_ : List[Any] =scores.index(max(snake_case__ ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) lowerCamelCase_ : Dict ={} lowerCamelCase_ : List[str] =final_strs lowerCamelCase_ : Optional[Any] =final_scores lowerCamelCase_ : List[str] =char_strs lowerCamelCase_ : Optional[Any] =bpe_strs lowerCamelCase_ : Tuple =wp_strs return out def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : List[str] ): if format == DecodeType.CHARACTER: lowerCamelCase_ : List[str] =self.char_decode lowerCamelCase_ : Union[str, Any] =1 lowerCamelCase_ : Any ="[s]" elif format == DecodeType.BPE: lowerCamelCase_ : List[Any] =self.bpe_decode lowerCamelCase_ : str =2 lowerCamelCase_ : Any ="#" elif format == DecodeType.WORDPIECE: lowerCamelCase_ : List[str] =self.wp_decode lowerCamelCase_ : Optional[Any] =102 lowerCamelCase_ : Optional[Any] ="[SEP]" else: raise ValueError(F"""Format {format} is not supported.""" ) lowerCamelCase_ , lowerCamelCase_ : Any =[], [] lowerCamelCase_ : str =pred_logits.size(0 ) lowerCamelCase_ : List[Any] =pred_logits.size(1 ) lowerCamelCase_ , lowerCamelCase_ : Optional[Any] =pred_logits.topk(1 , dim=-1 , largest=snake_case__ , sorted=snake_case__ ) lowerCamelCase_ : List[str] =preds_index.view(-1 , snake_case__ )[:, 1:] lowerCamelCase_ : Optional[int] =decoder(snake_case__ ) lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] =torch.nn.functional.softmax(snake_case__ , dim=2 ).max(dim=2 ) lowerCamelCase_ : List[str] =preds_max_prob[:, 1:] for index in range(snake_case__ ): lowerCamelCase_ : Dict =preds_str[index].find(snake_case__ ) lowerCamelCase_ : Optional[int] =preds_str[index][:pred_eos] lowerCamelCase_ : Tuple =preds_index[index].cpu().tolist() lowerCamelCase_ : Any =pred_index.index(snake_case__ ) if eos_token in pred_index else -1 lowerCamelCase_ : List[Any] =preds_max_prob[index][: pred_eos_index + 1] lowerCamelCase_ : Any =pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(snake_case__ ) conf_scores.append(snake_case__ ) return dec_strs, conf_scores def UpperCAmelCase__ ( self : int , snake_case__ : Union[str, Any] ): lowerCamelCase_ : int =[seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(snake_case__ )] return decode_strs def UpperCAmelCase__ ( self : str , snake_case__ : List[Any] ): return self.bpe_tokenizer.batch_decode(snake_case__ ) def UpperCAmelCase__ ( self : List[str] , snake_case__ : Union[str, Any] ): lowerCamelCase_ : List[str] =[seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(snake_case__ )] return decode_strs
209
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) A__ : Optional[int] = { 'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'], 'processing_speech_to_text': ['Speech2TextProcessor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Dict = ['Speech2TextTokenizer'] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Optional[Any] = ['Speech2TextFeatureExtractor'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Dict = [ 'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFSpeech2TextForConditionalGeneration', 'TFSpeech2TextModel', 'TFSpeech2TextPreTrainedModel', ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Optional[int] = [ 'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Speech2TextForConditionalGeneration', 'Speech2TextModel', 'Speech2TextPreTrainedModel', ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys A__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
209
1
'''simple docstring''' from __future__ import annotations from random import random class _a : def __init__( self : List[Any] , lowercase : int | None = None ): '''simple docstring''' UpperCAmelCase = value UpperCAmelCase = random() UpperCAmelCase = None UpperCAmelCase = None def __repr__( self : List[Any] ): '''simple docstring''' from pprint import pformat if self.left is None and self.right is None: return f"'{self.value}: {self.prior:.5}'" else: return pformat( {f"{self.value}: {self.prior:.5}": (self.left, self.right)} , indent=1 ) def __str__( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase = str(self.value ) + ''' ''' UpperCAmelCase = str(self.left or '''''' ) UpperCAmelCase = str(self.right or '''''' ) return value + left + right def snake_case_ (_a : Node | None , _a : int ): if root is None: # None tree is split into 2 Nones return None, None elif root.value is None: return None, None else: if value < root.value: UpperCAmelCase , UpperCAmelCase = split(root.left , _lowerCamelCase ) return left, root else: UpperCAmelCase , UpperCAmelCase = split(root.right , _lowerCamelCase ) return root, right def snake_case_ (_a : Node | None , _a : Node | None ): if (not left) or (not right): # If one node is None, return the other return left or right elif left.prior < right.prior: UpperCAmelCase = merge(left.right , _lowerCamelCase ) return left else: UpperCAmelCase = merge(_lowerCamelCase , right.left ) return right def snake_case_ (_a : Node | None , _a : int ): UpperCAmelCase = Node(_lowerCamelCase ) UpperCAmelCase , UpperCAmelCase = split(_lowerCamelCase , _lowerCamelCase ) return merge(merge(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase ) def snake_case_ (_a : Node | None , _a : int ): UpperCAmelCase , UpperCAmelCase = split(_lowerCamelCase , value - 1 ) UpperCAmelCase , UpperCAmelCase = split(_lowerCamelCase , _lowerCamelCase ) return merge(_lowerCamelCase , _lowerCamelCase ) def snake_case_ (_a : Node | None ): if not root: # None return else: inorder(root.left ) print(root.value , end=''',''' ) inorder(root.right ) def snake_case_ (_a : Node | None , _a : str ): for arg in args.split(): if arg[0] == "+": UpperCAmelCase = insert(_lowerCamelCase , int(arg[1:] ) ) elif arg[0] == "-": UpperCAmelCase = erase(_lowerCamelCase , int(arg[1:] ) ) else: print('''Unknown command''' ) return root def snake_case_ (): UpperCAmelCase = None print( '''enter numbers to create a tree, + value to add value into treap, ''' '''- value to erase all nodes with value. \'q\' to quit. ''' ) UpperCAmelCase = input() while args != "q": UpperCAmelCase = interact_treap(_lowerCamelCase , _lowerCamelCase ) print(_lowerCamelCase ) UpperCAmelCase = input() print('''good by!''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
34
'''simple docstring''' import argparse from typing import List import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __lowerCAmelCase : Optional[int] =16 __lowerCAmelCase : Tuple =32 def UpperCamelCase ( _lowerCamelCase : Accelerator , _lowerCamelCase : DatasetDict , _lowerCamelCase : List[int] , _lowerCamelCase : List[int] , _lowerCamelCase : int = 16 ): A__ = AutoTokenizer.from_pretrained("bert-base-cased" ) A__ = DatasetDict( { "train": dataset["train"].select(_lowerCamelCase ), "validation": dataset["train"].select(_lowerCamelCase ), "test": dataset["validation"], } ) def tokenize_function(_lowerCamelCase : Dict ): # max_length=None => use the model max length (it's actually the default) A__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCamelCase , max_length=_lowerCamelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): A__ = datasets.map( _lowerCamelCase , batched=_lowerCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library A__ = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(_lowerCamelCase : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. A__ = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": A__ = 16 elif accelerator.mixed_precision != "no": A__ = 8 else: A__ = None return tokenizer.pad( _lowerCamelCase , padding="longest" , max_length=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_tensors="pt" , ) # Instantiate dataloaders. A__ = DataLoader( tokenized_datasets["train"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase ) A__ = DataLoader( tokenized_datasets["validation"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase ) A__ = DataLoader( tokenized_datasets["test"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase ) return train_dataloader, eval_dataloader, test_dataloader def UpperCamelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : str ): # New Code # A__ = [] # Download the dataset A__ = load_dataset("glue" , "mrpc" ) # Create our splits A__ = StratifiedKFold(n_splits=int(args.num_folds ) ) # Initialize accelerator A__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs A__ = config["lr"] A__ = int(config["num_epochs"] ) A__ = int(config["seed"] ) A__ = int(config["batch_size"] ) A__ = evaluate.load("glue" , "mrpc" ) # If the batch size is too big we use gradient accumulation A__ = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: A__ = batch_size // MAX_GPU_BATCH_SIZE A__ = MAX_GPU_BATCH_SIZE set_seed(_lowerCamelCase ) # New Code # # Create our folds: A__ = kfold.split(np.zeros(datasets["train"].num_rows ) , datasets["train"]["label"] ) A__ = [] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(_lowerCamelCase ): A__, A__, A__ = get_fold_dataloaders( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) A__ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowerCamelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). A__ = model.to(accelerator.device ) # Instantiate optimizer A__ = AdamW(params=model.parameters() , lr=_lowerCamelCase ) # Instantiate scheduler A__ = get_linear_schedule_with_warmup( optimizer=_lowerCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(_lowerCamelCase ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. A__, A__, A__, A__, A__ = accelerator.prepare( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # Now we train the model for epoch in range(_lowerCamelCase ): model.train() for step, batch in enumerate(_lowerCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) A__ = model(**_lowerCamelCase ) A__ = outputs.loss A__ = loss / gradient_accumulation_steps accelerator.backward(_lowerCamelCase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_lowerCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): A__ = model(**_lowerCamelCase ) A__ = outputs.logits.argmax(dim=-1 ) A__, A__ = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=_lowerCamelCase , references=_lowerCamelCase , ) A__ = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"epoch {epoch}:" , _lowerCamelCase ) # New Code # # We also run predictions on the test set at the very end A__ = [] for step, batch in enumerate(_lowerCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): A__ = model(**_lowerCamelCase ) A__ = outputs.logits A__, A__ = accelerator.gather_for_metrics((predictions, batch["labels"]) ) fold_predictions.append(predictions.cpu() ) if i == 0: # We need all of the test predictions test_references.append(references.cpu() ) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(_lowerCamelCase , dim=0 ) ) # We now need to release all our memory and get rid of the current model, optimizer, etc accelerator.free_memory() # New Code # # Finally we check the accuracy of our folded results: A__ = torch.cat(_lowerCamelCase , dim=0 ) A__ = torch.stack(_lowerCamelCase , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 ) A__ = metric.compute(predictions=_lowerCamelCase , references=_lowerCamelCase ) accelerator.print("Average test metrics from all folds:" , _lowerCamelCase ) def UpperCamelCase ( ): A__ = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=_lowerCamelCase , default=_lowerCamelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) # New Code # parser.add_argument("--num_folds" , type=_lowerCamelCase , default=3 , help="The number of splits to perform across the dataset" ) A__ = parser.parse_args() A__ = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(_lowerCamelCase , _lowerCamelCase ) if __name__ == "__main__": main()
237
0
"""simple docstring""" # Author: OMKAR PATHAK, Nwachukwu Chidiebere # Use a Python dictionary to construct the graph. from __future__ import annotations from pprint import pformat from typing import Generic, TypeVar _A = TypeVar("""T""") class _lowerCamelCase ( Generic[T] ): def __init__( self : List[str] , UpperCamelCase : bool = True ) -> None: """simple docstring""" lowerCAmelCase__ : dict[T, list[T]] = {} # dictionary of lists lowerCAmelCase__ : Any = directed def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : T , UpperCamelCase : T ) -> GraphAdjacencyList[T]: """simple docstring""" if not self.directed: # For undirected graphs # if both source vertex and destination vertex are both present in the # adjacency list, add destination vertex to source vertex list of adjacent # vertices and add source vertex to destination vertex list of adjacent # vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(UpperCamelCase ) self.adj_list[destination_vertex].append(UpperCamelCase ) # if only source vertex is present in adjacency list, add destination vertex # to source vertex list of adjacent vertices, then create a new vertex with # destination vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(UpperCamelCase ) lowerCAmelCase__ : Tuple = [source_vertex] # if only destination vertex is present in adjacency list, add source vertex # to destination vertex list of adjacent vertices, then create a new vertex # with source vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif destination_vertex in self.adj_list: self.adj_list[destination_vertex].append(UpperCamelCase ) lowerCAmelCase__ : str = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and assign a list # containing the destination vertex as it's first adjacent vertex also # create a new vertex with destination vertex as key and assign a list # containing the source vertex as it's first adjacent vertex. else: lowerCAmelCase__ : Any = [destination_vertex] lowerCAmelCase__ : Tuple = [source_vertex] else: # For directed graphs # if both source vertex and destination vertex are present in adjacency # list, add destination vertex to source vertex list of adjacent vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(UpperCamelCase ) # if only source vertex is present in adjacency list, add destination # vertex to source vertex list of adjacent vertices and create a new vertex # with destination vertex as key, which has no adjacent vertex elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(UpperCamelCase ) lowerCAmelCase__ : Optional[int] = [] # if only destination vertex is present in adjacency list, create a new # vertex with source vertex as key and assign a list containing destination # vertex as first adjacent vertex elif destination_vertex in self.adj_list: lowerCAmelCase__ : Any = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and a list containing # destination vertex as it's first adjacent vertex. Then create a new vertex # with destination vertex as key, which has no adjacent vertex else: lowerCAmelCase__ : Any = [destination_vertex] lowerCAmelCase__ : Optional[Any] = [] return self def __repr__( self : List[Any] ) -> str: """simple docstring""" return pformat(self.adj_list )
212
"""simple docstring""" from .configuration_bert_masked import MaskedBertConfig from .modeling_bert_masked import ( MaskedBertForMultipleChoice, MaskedBertForQuestionAnswering, MaskedBertForSequenceClassification, MaskedBertForTokenClassification, MaskedBertModel, ) from .modules import *
212
1
'''simple docstring''' from functools import reduce __a = ( "73167176531330624919225119674426574742355349194934" "96983520312774506326239578318016984801869478851843" "85861560789112949495459501737958331952853208805511" "12540698747158523863050715693290963295227443043557" "66896648950445244523161731856403098711121722383113" "62229893423380308135336276614282806444486645238749" "30358907296290491560440772390713810515859307960866" "70172427121883998797908792274921901699720888093776" "65727333001053367881220235421809751254540594752243" "52584907711670556013604839586446706324415722155397" "53697817977846174064955149290862569321978468622482" "83972241375657056057490261407972968652414535100474" "82166370484403199890008895243450658541227588666881" "16427171479924442928230863465674813919123162824586" "17866458359124566529476545682848912883142607690042" "24219022671055626321111109370544217506941658960408" "07198403850962455444362981230987879927244284909188" "84580156166097919133875499200524063689912560717606" "05886116467109405077541002256983155200055935729725" "71636269561882670428252483600823257530420752963450" ) def __snake_case( _lowerCAmelCase = N ) -> int: return max( # mypy cannot properly interpret reduce int(reduce(lambda _lowerCAmelCase , _lowerCAmelCase : str(int(_lowerCAmelCase ) * int(_lowerCAmelCase ) ) , n[i : i + 13] ) ) for i in range(len(_lowerCAmelCase ) - 12 ) ) if __name__ == "__main__": print(F"{solution() = }")
35
import gc import math import unittest import torch from diffusers import UNetaDModel from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin lowerCAmelCase__ : Dict = logging.get_logger(__name__) enable_full_determinism() class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ): __lowerCamelCase = UNetaDModel __lowerCamelCase = """sample""" @property def __a ( self ) -> Any: '''simple docstring''' snake_case__ : Optional[Any] = 4 snake_case__ : List[Any] = 3 snake_case__ : int = (32, 32) snake_case__ : List[str] = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCamelCase ) snake_case__ : str = torch.tensor([10] ).to(__UpperCamelCase ) return {"sample": noise, "timestep": time_step} @property def __a ( self ) -> Optional[int]: '''simple docstring''' return (3, 32, 32) @property def __a ( self ) -> Optional[int]: '''simple docstring''' return (3, 32, 32) def __a ( self ) -> Any: '''simple docstring''' snake_case__ : Union[str, Any] = { 'block_out_channels': (32, 64), 'down_block_types': ('DownBlock2D', 'AttnDownBlock2D'), 'up_block_types': ('AttnUpBlock2D', 'UpBlock2D'), 'attention_head_dim': 3, 'out_channels': 3, 'in_channels': 3, 'layers_per_block': 2, 'sample_size': 32, } snake_case__ : List[Any] = self.dummy_input return init_dict, inputs_dict class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ): __lowerCamelCase = UNetaDModel __lowerCamelCase = """sample""" @property def __a ( self ) -> Tuple: '''simple docstring''' snake_case__ : List[Any] = 4 snake_case__ : List[Any] = 4 snake_case__ : List[str] = (32, 32) snake_case__ : str = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCamelCase ) snake_case__ : int = torch.tensor([10] ).to(__UpperCamelCase ) return {"sample": noise, "timestep": time_step} @property def __a ( self ) -> int: '''simple docstring''' return (4, 32, 32) @property def __a ( self ) -> str: '''simple docstring''' return (4, 32, 32) def __a ( self ) -> Dict: '''simple docstring''' snake_case__ : Union[str, Any] = { 'sample_size': 32, 'in_channels': 4, 'out_channels': 4, 'layers_per_block': 2, 'block_out_channels': (32, 64), 'attention_head_dim': 32, 'down_block_types': ('DownBlock2D', 'DownBlock2D'), 'up_block_types': ('UpBlock2D', 'UpBlock2D'), } snake_case__ : List[Any] = self.dummy_input return init_dict, inputs_dict def __a ( self ) -> str: '''simple docstring''' snake_case__ , snake_case__ : Optional[int] = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) self.assertEqual(len(loading_info['missing_keys'] ) , 0 ) model.to(__UpperCamelCase ) snake_case__ : List[Any] = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' ) def __a ( self ) -> List[str]: '''simple docstring''' snake_case__ , snake_case__ : List[str] = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=__UpperCamelCase ) model.to(__UpperCamelCase ) snake_case__ : Union[str, Any] = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' ) def __a ( self ) -> str: '''simple docstring''' snake_case__ , snake_case__ : List[str] = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=__UpperCamelCase ) model_accelerate.to(__UpperCamelCase ) model_accelerate.eval() snake_case__ : Tuple = torch.randn( 1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , ) snake_case__ : Union[str, Any] = noise.to(__UpperCamelCase ) snake_case__ : List[str] = torch.tensor([10] * noise.shape[0] ).to(__UpperCamelCase ) snake_case__ : str = model_accelerate(__UpperCamelCase , __UpperCamelCase )['sample'] # two models don't need to stay in the device at the same time del model_accelerate torch.cuda.empty_cache() gc.collect() snake_case__ , snake_case__ : Union[str, Any] = UNetaDModel.from_pretrained( 'fusing/unet-ldm-dummy-update' , output_loading_info=__UpperCamelCase , low_cpu_mem_usage=__UpperCamelCase ) model_normal_load.to(__UpperCamelCase ) model_normal_load.eval() snake_case__ : List[str] = model_normal_load(__UpperCamelCase , __UpperCamelCase )['sample'] assert torch_all_close(__UpperCamelCase , __UpperCamelCase , rtol=1E-3 ) def __a ( self ) -> Optional[Any]: '''simple docstring''' snake_case__ : List[Any] = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' ) model.eval() model.to(__UpperCamelCase ) snake_case__ : Any = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) snake_case__ : List[Any] = noise.to(__UpperCamelCase ) snake_case__ : List[str] = torch.tensor([10] * noise.shape[0] ).to(__UpperCamelCase ) with torch.no_grad(): snake_case__ : List[str] = model(__UpperCamelCase , __UpperCamelCase ).sample snake_case__ : Tuple = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off snake_case__ : int = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] ) # fmt: on self.assertTrue(torch_all_close(__UpperCamelCase , __UpperCamelCase , rtol=1E-3 ) ) class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ): __lowerCamelCase = UNetaDModel __lowerCamelCase = """sample""" @property def __a ( self , __UpperCamelCase=(32, 32) ) -> Optional[Any]: '''simple docstring''' snake_case__ : Dict = 4 snake_case__ : Dict = 3 snake_case__ : str = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCamelCase ) snake_case__ : List[str] = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=__UpperCamelCase ) return {"sample": noise, "timestep": time_step} @property def __a ( self ) -> Optional[int]: '''simple docstring''' return (3, 32, 32) @property def __a ( self ) -> int: '''simple docstring''' return (3, 32, 32) def __a ( self ) -> List[str]: '''simple docstring''' snake_case__ : Optional[Any] = { 'block_out_channels': [32, 64, 64, 64], 'in_channels': 3, 'layers_per_block': 1, 'out_channels': 3, 'time_embedding_type': 'fourier', 'norm_eps': 1E-6, 'mid_block_scale_factor': math.sqrt(2.0 ), 'norm_num_groups': None, 'down_block_types': [ 'SkipDownBlock2D', 'AttnSkipDownBlock2D', 'SkipDownBlock2D', 'SkipDownBlock2D', ], 'up_block_types': [ 'SkipUpBlock2D', 'SkipUpBlock2D', 'AttnSkipUpBlock2D', 'SkipUpBlock2D', ], } snake_case__ : str = self.dummy_input return init_dict, inputs_dict @slow def __a ( self ) -> Optional[Any]: '''simple docstring''' snake_case__ , snake_case__ : str = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' , output_loading_info=__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) self.assertEqual(len(loading_info['missing_keys'] ) , 0 ) model.to(__UpperCamelCase ) snake_case__ : Dict = self.dummy_input snake_case__ : Union[str, Any] = floats_tensor((4, 3) + (256, 256) ).to(__UpperCamelCase ) snake_case__ : List[Any] = noise snake_case__ : Any = model(**__UpperCamelCase ) assert image is not None, "Make sure output is not None" @slow def __a ( self ) -> Dict: '''simple docstring''' snake_case__ : str = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' ) model.to(__UpperCamelCase ) snake_case__ : Optional[Any] = 4 snake_case__ : str = 3 snake_case__ : List[Any] = (256, 256) snake_case__ : Dict = torch.ones((batch_size, num_channels) + sizes ).to(__UpperCamelCase ) snake_case__ : int = torch.tensor(batch_size * [1E-4] ).to(__UpperCamelCase ) with torch.no_grad(): snake_case__ : str = model(__UpperCamelCase , __UpperCamelCase ).sample snake_case__ : Optional[int] = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off snake_case__ : Optional[int] = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] ) # fmt: on self.assertTrue(torch_all_close(__UpperCamelCase , __UpperCamelCase , rtol=1E-2 ) ) def __a ( self ) -> List[Any]: '''simple docstring''' snake_case__ : Dict = UNetaDModel.from_pretrained('fusing/ncsnpp-ffhq-ve-dummy-update' ) model.to(__UpperCamelCase ) snake_case__ : Dict = 4 snake_case__ : List[str] = 3 snake_case__ : Union[str, Any] = (32, 32) snake_case__ : Optional[int] = torch.ones((batch_size, num_channels) + sizes ).to(__UpperCamelCase ) snake_case__ : int = torch.tensor(batch_size * [1E-4] ).to(__UpperCamelCase ) with torch.no_grad(): snake_case__ : Tuple = model(__UpperCamelCase , __UpperCamelCase ).sample snake_case__ : List[str] = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off snake_case__ : Optional[int] = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] ) # fmt: on self.assertTrue(torch_all_close(__UpperCamelCase , __UpperCamelCase , rtol=1E-2 ) ) def __a ( self ) -> Tuple: '''simple docstring''' pass
143
0
'''simple docstring''' from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo lowerCamelCase = """\ @misc{wu2016googles, title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation}, author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes and Jeffrey Dean}, year={2016}, eprint={1609.08144}, archivePrefix={arXiv}, primaryClass={cs.CL} } """ lowerCamelCase = """\ The BLEU score has some undesirable properties when used for single sentences, as it was designed to be a corpus measure. We therefore use a slightly different score for our RL experiments which we call the 'GLEU score'. For the GLEU score, we record all sub-sequences of 1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then compute a recall, which is the ratio of the number of matching n-grams to the number of total n-grams in the target (ground truth) sequence, and a precision, which is the ratio of the number of matching n-grams to the number of total n-grams in the generated output sequence. Then GLEU score is simply the minimum of recall and precision. This GLEU score's range is always between 0 (no matches) and 1 (all match) and it is symmetrical when switching output and target. According to our experiments, GLEU score correlates quite well with the BLEU metric on a corpus level but does not have its drawbacks for our per sentence reward objective. """ lowerCamelCase = """\ Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references. Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values. Args: predictions (list of str): list of translations to score. Each translation should be tokenized into a list of tokens. references (list of list of str): list of lists of references for each translation. Each reference should be tokenized into a list of tokens. min_len (int): The minimum order of n-gram this function should extract. Defaults to 1. max_len (int): The maximum order of n-gram this function should extract. Defaults to 4. Returns: 'google_bleu': google_bleu score Examples: Example 1: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.44 Example 2: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.61 Example 3: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2) >>> print(round(results[\"google_bleu\"], 2)) 0.53 Example 4: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6) >>> print(round(results[\"google_bleu\"], 2)) 0.4 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _UpperCamelCase ( datasets.Metric ): '''simple docstring''' def __lowerCamelCase ( self : Optional[int]): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' , id='token') , id='sequence'), 'references': datasets.Sequence( datasets.Sequence(datasets.Value('string' , id='token') , id='sequence') , id='references'), }) , ) def __lowerCamelCase ( self : List[Any] , _lowerCAmelCase : List[List[List[str]]] , _lowerCAmelCase : List[List[str]] , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 4 , ): '''simple docstring''' return { "google_bleu": gleu_score.corpus_gleu( list_of_references=__lowerCAmelCase , hypotheses=__lowerCAmelCase , min_len=__lowerCAmelCase , max_len=__lowerCAmelCase) }
358
'''simple docstring''' import warnings from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging lowerCamelCase = logging.get_logger(__name__) lowerCamelCase = { """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""", # See all BART models at https://huggingface.co/models?filter=bart } class _UpperCamelCase ( A ): '''simple docstring''' lowerCAmelCase__ = """bart""" lowerCAmelCase__ = ["""past_key_values"""] lowerCAmelCase__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self : List[str] , _lowerCAmelCase : Any=5_0_2_6_5 , _lowerCAmelCase : Optional[Any]=1_0_2_4 , _lowerCAmelCase : List[Any]=1_2 , _lowerCAmelCase : Any=4_0_9_6 , _lowerCAmelCase : List[str]=1_6 , _lowerCAmelCase : List[Any]=1_2 , _lowerCAmelCase : Dict=4_0_9_6 , _lowerCAmelCase : Optional[Any]=1_6 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : Optional[Any]=0.0 , _lowerCAmelCase : str="gelu" , _lowerCAmelCase : str=1_0_2_4 , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : Union[str, Any]=0.0 , _lowerCAmelCase : List[str]=0.0 , _lowerCAmelCase : Optional[int]=0.02 , _lowerCAmelCase : Optional[int]=0.0 , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Union[str, Any]=3 , _lowerCAmelCase : Tuple=1 , _lowerCAmelCase : Dict=0 , _lowerCAmelCase : int=2 , _lowerCAmelCase : Any=True , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : str=2 , **_lowerCAmelCase : Optional[int] , ): '''simple docstring''' __lowercase =vocab_size __lowercase =max_position_embeddings __lowercase =d_model __lowercase =encoder_ffn_dim __lowercase =encoder_layers __lowercase =encoder_attention_heads __lowercase =decoder_ffn_dim __lowercase =decoder_layers __lowercase =decoder_attention_heads __lowercase =dropout __lowercase =attention_dropout __lowercase =activation_dropout __lowercase =activation_function __lowercase =init_std __lowercase =encoder_layerdrop __lowercase =decoder_layerdrop __lowercase =classifier_dropout __lowercase =use_cache __lowercase =encoder_layers __lowercase =scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( num_labels=_lowerCAmelCase , pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , forced_eos_token_id=_lowerCAmelCase , **_lowerCAmelCase , ) # ensure backward compatibility for BART CNN models if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , _lowerCAmelCase): __lowercase =self.bos_token_id warnings.warn( f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """ 'The config can simply be saved and uploaded again to be fixed.') class _UpperCamelCase ( A ): '''simple docstring''' @property def __lowerCamelCase ( self : List[Any]): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: __lowercase =OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ]) if self.use_past: __lowercase ={0: 'batch'} __lowercase ={0: 'batch', 1: 'past_decoder_sequence + sequence'} else: __lowercase ={0: 'batch', 1: 'decoder_sequence'} __lowercase ={0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(_lowerCAmelCase , direction='inputs') elif self.task == "causal-lm": # TODO: figure this case out. __lowercase =OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ]) if self.use_past: __lowercase , __lowercase =self.num_layers for i in range(_lowerCAmelCase): __lowercase ={0: 'batch', 2: 'past_sequence + sequence'} __lowercase ={0: 'batch', 2: 'past_sequence + sequence'} else: __lowercase =OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}), ('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}), ]) return common_inputs @property def __lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: __lowercase =super().outputs else: __lowercase =super(_lowerCAmelCase , self).outputs if self.use_past: __lowercase , __lowercase =self.num_layers for i in range(_lowerCAmelCase): __lowercase ={0: 'batch', 2: 'past_sequence + sequence'} __lowercase ={0: 'batch', 2: 'past_sequence + sequence'} return common_outputs def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ): '''simple docstring''' __lowercase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase) # Generate decoder inputs __lowercase =seq_length if not self.use_past else 1 __lowercase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase) __lowercase ={f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()} __lowercase =dict(**_lowerCAmelCase , **_lowerCAmelCase) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.') else: import torch __lowercase , __lowercase =common_inputs['input_ids'].shape __lowercase =common_inputs['decoder_input_ids'].shape[1] __lowercase , __lowercase =self.num_attention_heads __lowercase =( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) __lowercase =decoder_seq_length + 3 __lowercase =( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) __lowercase =torch.cat( [common_inputs['decoder_attention_mask'], torch.ones(_lowerCAmelCase , _lowerCAmelCase)] , dim=1) __lowercase =[] # If the number of encoder and decoder layers are present in the model configuration, both are considered __lowercase , __lowercase =self.num_layers __lowercase =min(_lowerCAmelCase , _lowerCAmelCase) __lowercase =max(_lowerCAmelCase , _lowerCAmelCase) - min_num_layers __lowercase ='encoder' if num_encoder_layers > num_decoder_layers else 'decoder' for _ in range(_lowerCAmelCase): common_inputs["past_key_values"].append( ( torch.zeros(_lowerCAmelCase), torch.zeros(_lowerCAmelCase), torch.zeros(_lowerCAmelCase), torch.zeros(_lowerCAmelCase), )) # TODO: test this. __lowercase =encoder_shape if remaining_side_name == 'encoder' else decoder_shape for _ in range(_lowerCAmelCase , _lowerCAmelCase): common_inputs["past_key_values"].append((torch.zeros(_lowerCAmelCase), torch.zeros(_lowerCAmelCase))) return common_inputs def __lowerCamelCase ( self : List[str] , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ): '''simple docstring''' __lowercase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.') else: import torch __lowercase , __lowercase =common_inputs['input_ids'].shape # Not using the same length for past_key_values __lowercase =seqlen + 2 __lowercase , __lowercase =self.num_layers __lowercase , __lowercase =self.num_attention_heads __lowercase =( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) __lowercase =common_inputs['attention_mask'].dtype __lowercase =torch.cat( [common_inputs['attention_mask'], torch.ones(_lowerCAmelCase , _lowerCAmelCase , dtype=_lowerCAmelCase)] , dim=1) __lowercase =[ (torch.zeros(_lowerCAmelCase), torch.zeros(_lowerCAmelCase)) for _ in range(_lowerCAmelCase) ] return common_inputs def __lowerCamelCase ( self : Optional[int] , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ): '''simple docstring''' __lowercase =compute_effective_axis_dimension( _lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX __lowercase =tokenizer.num_special_tokens_to_add(_lowerCAmelCase) __lowercase =compute_effective_axis_dimension( _lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCAmelCase) # Generate dummy inputs according to compute batch and sequence __lowercase =[' '.join([tokenizer.unk_token]) * seq_length] * batch_size __lowercase =dict(tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase)) return common_inputs def __lowerCamelCase ( self : Optional[Any] , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: __lowercase =self._generate_dummy_inputs_for_default_and_seqaseq_lm( _lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase) elif self.task == "causal-lm": __lowercase =self._generate_dummy_inputs_for_causal_lm( _lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase) else: __lowercase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase) return common_inputs def __lowerCamelCase ( self : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any]): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: __lowercase =super()._flatten_past_key_values_(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase) else: __lowercase =super(_lowerCAmelCase , self)._flatten_past_key_values_( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
48
0
from __future__ import annotations from bisect import bisect_left from functools import total_ordering from heapq import merge @total_ordering class _snake_case ( A__ ): def __lt__( self , a) -> Tuple: return self[-1] < other[-1] def __eq__( self , a) -> int: return self[-1] == other[-1] def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = [] # sort into stacks for element in collection: SCREAMING_SNAKE_CASE = Stack([element]) SCREAMING_SNAKE_CASE = bisect_left(_UpperCAmelCase , _UpperCAmelCase) if i != len(_UpperCAmelCase): stacks[i].append(_UpperCAmelCase) else: stacks.append(_UpperCAmelCase) # use a heap-based merge to merge stack efficiently SCREAMING_SNAKE_CASE = merge(*(reversed(_UpperCAmelCase) for stack in stacks)) return collection if __name__ == "__main__": a_ : Tuple = input('Enter numbers separated by a comma:\n').strip() a_ : Dict = [int(item) for item in user_input.split(',')] print(patience_sort(unsorted))
137
import logging import math import os from dataclasses import dataclass, field from glob import glob from typing import Optional from torch.utils.data import ConcatDataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, AutoConfig, AutoModelWithLMHead, AutoTokenizer, DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForWholeWordMask, HfArgumentParser, LineByLineTextDataset, LineByLineWithRefDataset, PreTrainedTokenizer, TextDataset, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process a_ : int = logging.getLogger(__name__) a_ : List[str] = list(MODEL_WITH_LM_HEAD_MAPPING.keys()) a_ : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class _snake_case : _lowercase : Optional[str] = field( default=A__ , metadata={ '''help''': ( '''The model checkpoint for weights initialization. Leave None if you want to train a model from''' ''' scratch.''' ) } , ) _lowercase : Optional[str] = field( default=A__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(A__ )} , ) _lowercase : Optional[str] = field( default=A__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) _lowercase : Optional[str] = field( default=A__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) _lowercase : Optional[str] = field( default=A__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) @dataclass class _snake_case : _lowercase : Optional[str] = field( default=A__ , metadata={'''help''': '''The input training data file (a text file).'''} ) _lowercase : Optional[str] = field( default=A__ , metadata={ '''help''': ( '''The input training data files (multiple files in glob format). ''' '''Very often splitting large files to smaller files can prevent tokenizer going out of memory''' ) } , ) _lowercase : Optional[str] = field( default=A__ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , ) _lowercase : Optional[str] = field( default=A__ , metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''} , ) _lowercase : Optional[str] = field( default=A__ , metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''} , ) _lowercase : bool = field( default=A__ , metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''} , ) _lowercase : bool = field( default=A__ , metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''} ) _lowercase : bool = field(default=A__ , metadata={'''help''': '''Whether ot not to use whole word mask.'''} ) _lowercase : float = field( default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} ) _lowercase : float = field( default=1 / 6 , metadata={ '''help''': ( '''Ratio of length of a span of masked tokens to surrounding context length for permutation language''' ''' modeling.''' ) } , ) _lowercase : int = field( default=5 , metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''} ) _lowercase : int = field( default=-1 , metadata={ '''help''': ( '''Optional input sequence length after tokenization.''' '''The training dataset will be truncated in block of this size for training.''' '''Default to the model max input length for single sentence inputs (take into account special tokens).''' ) } , ) _lowercase : bool = field( default=A__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = None , ): def _dataset(_UpperCAmelCase , _UpperCAmelCase=None): if args.line_by_line: if ref_path is not None: if not args.whole_word_mask or not args.mlm: raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask') return LineByLineWithRefDataset( tokenizer=_UpperCAmelCase , file_path=_UpperCAmelCase , block_size=args.block_size , ref_path=_UpperCAmelCase , ) return LineByLineTextDataset(tokenizer=_UpperCAmelCase , file_path=_UpperCAmelCase , block_size=args.block_size) else: return TextDataset( tokenizer=_UpperCAmelCase , file_path=_UpperCAmelCase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_UpperCAmelCase , ) if evaluate: return _dataset(args.eval_data_file , args.eval_ref_file) elif args.train_data_files: return ConcatDataset([_dataset(_UpperCAmelCase) for f in glob(args.train_data_files)]) else: return _dataset(args.train_data_file , args.train_ref_file) def lowerCamelCase__ (): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses() if data_args.eval_data_file is None and training_args.do_eval: raise ValueError( 'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file ' 'or remove the --do_eval argument.') if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' ' --overwrite_output_dir to overcome.') # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , _UpperCAmelCase) # Set seed set_seed(training_args.seed) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if model_args.config_name: SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir) elif model_args.model_name_or_path: SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir) else: SCREAMING_SNAKE_CASE = CONFIG_MAPPING[model_args.model_type]() logger.warning('You are instantiating a new config instance from scratch.') if model_args.tokenizer_name: SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir) elif model_args.model_name_or_path: SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir) else: raise ValueError( 'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another' ' script, save it,and load it from here, using --tokenizer_name') if model_args.model_name_or_path: SCREAMING_SNAKE_CASE = AutoModelWithLMHead.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , ) else: logger.info('Training new model from scratch') SCREAMING_SNAKE_CASE = AutoModelWithLMHead.from_config(_UpperCAmelCase) model.resize_token_embeddings(len(_UpperCAmelCase)) if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm: raise ValueError( 'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the' '--mlm flag (masked language modeling).') if data_args.block_size <= 0: SCREAMING_SNAKE_CASE = tokenizer.max_len # Our input block size will be the max possible for the model else: SCREAMING_SNAKE_CASE = min(data_args.block_size , tokenizer.max_len) # Get datasets SCREAMING_SNAKE_CASE = ( get_dataset(_UpperCAmelCase , tokenizer=_UpperCAmelCase , cache_dir=model_args.cache_dir) if training_args.do_train else None ) SCREAMING_SNAKE_CASE = ( get_dataset(_UpperCAmelCase , tokenizer=_UpperCAmelCase , evaluate=_UpperCAmelCase , cache_dir=model_args.cache_dir) if training_args.do_eval else None ) if config.model_type == "xlnet": SCREAMING_SNAKE_CASE = DataCollatorForPermutationLanguageModeling( tokenizer=_UpperCAmelCase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , ) else: if data_args.mlm and data_args.whole_word_mask: SCREAMING_SNAKE_CASE = DataCollatorForWholeWordMask( tokenizer=_UpperCAmelCase , mlm_probability=data_args.mlm_probability) else: SCREAMING_SNAKE_CASE = DataCollatorForLanguageModeling( tokenizer=_UpperCAmelCase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability) # Initialize our Trainer SCREAMING_SNAKE_CASE = Trainer( model=_UpperCAmelCase , args=_UpperCAmelCase , data_collator=_UpperCAmelCase , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , prediction_loss_only=_UpperCAmelCase , ) # Training if training_args.do_train: SCREAMING_SNAKE_CASE = ( model_args.model_name_or_path if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path) else None ) trainer.train(model_path=_UpperCAmelCase) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir) # Evaluation SCREAMING_SNAKE_CASE = {} if training_args.do_eval: logger.info('*** Evaluate ***') SCREAMING_SNAKE_CASE = trainer.evaluate() SCREAMING_SNAKE_CASE = math.exp(eval_output['eval_loss']) SCREAMING_SNAKE_CASE = {'perplexity': perplexity} SCREAMING_SNAKE_CASE = os.path.join(training_args.output_dir , 'eval_results_lm.txt') if trainer.is_world_master(): with open(_UpperCAmelCase , 'w') as writer: logger.info('***** Eval results *****') for key in sorted(result.keys()): logger.info(' %s = %s' , _UpperCAmelCase , str(result[key])) writer.write('%s = %s\n' % (key, str(result[key]))) results.update(_UpperCAmelCase) return results def lowerCamelCase__ (_UpperCAmelCase): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
137
1
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCamelCase : int = logging.get_logger(__name__) UpperCamelCase : Dict = '▁' UpperCamelCase : int = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'} UpperCamelCase : List[Any] = { 'vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model', }, 'monolingual_vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt', }, } UpperCamelCase : Any = {'vinai/bartpho-syllable': 1_024} class UpperCamelCase ( UpperCamelCase__ ): """simple docstring""" A : Any = VOCAB_FILES_NAMES A : List[Any] = PRETRAINED_VOCAB_FILES_MAP A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A : Tuple = ["input_ids", "attention_mask"] def __init__( self : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any="<s>" , UpperCAmelCase_ : Union[str, Any]="</s>" , UpperCAmelCase_ : Dict="</s>" , UpperCAmelCase_ : Tuple="<s>" , UpperCAmelCase_ : Dict="<unk>" , UpperCAmelCase_ : Dict="<pad>" , UpperCAmelCase_ : Union[str, Any]="<mask>" , UpperCAmelCase_ : str = None , **UpperCAmelCase_ : Union[str, Any] , ): """simple docstring""" a : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token a : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , ) a : int = vocab_file a : Dict = monolingual_vocab_file a : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(__lowerCamelCase)) # Load the reduced vocab # Keep order of special tokens for backward compatibility a : int = {} a : Optional[Any] = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(__lowerCamelCase) not in self.fairseq_tokens_to_ids: a : Any = cnt cnt += 1 with open(__lowerCamelCase , 'r' , encoding='utf-8') as f: for line in f.readlines(): a : str = line.strip().split()[0] a : Any = len(self.fairseq_tokens_to_ids) if str(__lowerCamelCase) not in self.fairseq_tokens_to_ids: a : Optional[Any] = len(self.fairseq_tokens_to_ids) a : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : Optional[Any]): """simple docstring""" a : Tuple = self.__dict__.copy() a : int = None a : str = self.sp_model.serialized_model_proto() return state def __setstate__( self : int , UpperCAmelCase_ : Dict): """simple docstring""" a : List[Any] = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs'): a : str = {} a : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto) def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] = None): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] a : Optional[Any] = [self.cls_token_id] a : List[Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any = None , UpperCAmelCase_ : List[str] = False): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase) if token_ids_a is None: return [1] + ([0] * len(__lowerCamelCase)) + [1] return [1] + ([0] * len(__lowerCamelCase)) + [1, 1] + ([0] * len(__lowerCamelCase)) + [1] def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple = None): """simple docstring""" a : Dict = [self.sep_token_id] a : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] @property def SCREAMING_SNAKE_CASE_ ( self : str): """simple docstring""" return len(self.fairseq_ids_to_tokens) def SCREAMING_SNAKE_CASE_ ( self : Any): """simple docstring""" a : Dict = {self.convert_ids_to_tokens(__lowerCamelCase): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : Union[str, Any]): """simple docstring""" return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase) def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : int): """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : Optional[Any]): """simple docstring""" return self.fairseq_ids_to_tokens[index] def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : Tuple): """simple docstring""" a : Union[str, Any] = ''.join(__lowerCamelCase).replace(__lowerCamelCase , ' ').strip() return out_string def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int = None): """simple docstring""" if not os.path.isdir(__lowerCamelCase): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""") return a : Union[str, Any] = os.path.join( __lowerCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) a : Dict = os.path.join( __lowerCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] , ) if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , __lowerCamelCase) elif not os.path.isfile(self.vocab_file): with open(__lowerCamelCase , 'wb') as fi: a : List[str] = self.sp_model.serialized_model_proto() fi.write(__lowerCamelCase) if os.path.abspath(self.monolingual_vocab_file) != os.path.abspath( __lowerCamelCase) and os.path.isfile(self.monolingual_vocab_file): copyfile(self.monolingual_vocab_file , __lowerCamelCase) elif not os.path.isfile(self.monolingual_vocab_file): with open(__lowerCamelCase , 'w' , encoding='utf-8') as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f"""{str(__lowerCamelCase)} \n""") return out_vocab_file, out_monolingual_vocab_file
367
'''simple docstring''' import baseaa import io import json import os from copy import deepcopy from ..optimizer import AcceleratedOptimizer from ..scheduler import AcceleratedScheduler class UpperCamelCase : """simple docstring""" def __init__( self : List[str] , UpperCAmelCase_ : Tuple): """simple docstring""" if isinstance(UpperCAmelCase_ , UpperCAmelCase_): # Don't modify user's data should they want to reuse it (e.g. in tests), because once we # modified it, it will not be accepted here again, since `auto` values would have been overridden a : Dict = deepcopy(UpperCAmelCase_) elif os.path.exists(UpperCAmelCase_): with io.open(UpperCAmelCase_ , 'r' , encoding='utf-8') as f: a : Union[str, Any] = json.load(UpperCAmelCase_) else: try: a : Union[str, Any] = baseaa.urlsafe_baadecode(UpperCAmelCase_).decode('utf-8') a : List[str] = json.loads(UpperCAmelCase_) except (UnicodeDecodeError, AttributeError, ValueError): raise ValueError( f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""") a : Optional[int] = config self.set_stage_and_offload() def SCREAMING_SNAKE_CASE_ ( self : List[Any]): """simple docstring""" a : str = self.get_value('zero_optimization.stage' , -1) # offload a : Any = False if self.is_zeroa() or self.is_zeroa(): a : Tuple = set(['cpu', 'nvme']) a : int = set( [ self.get_value('zero_optimization.offload_optimizer.device'), self.get_value('zero_optimization.offload_param.device'), ]) if len(offload_devices & offload_devices_valid) > 0: a : List[str] = True def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Dict): """simple docstring""" a : List[str] = self.config # find the config node of interest if it exists a : int = ds_key_long.split('.') a : Union[str, Any] = nodes.pop() for node in nodes: a : Union[str, Any] = config.get(UpperCAmelCase_) if config is None: return None, ds_key return config, ds_key def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int=None): """simple docstring""" a , a : int = self.find_config_node(UpperCAmelCase_) if config is None: return default return config.get(UpperCAmelCase_ , UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any=False): """simple docstring""" a : Any = self.config # find the config node of interest if it exists a : Optional[Any] = ds_key_long.split('.') for node in nodes: a : List[str] = config a : int = config.get(UpperCAmelCase_) if config is None: if must_exist: raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""") else: return # if found remove it if parent_config is not None: parent_config.pop(UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : str): """simple docstring""" a : List[str] = self.get_value(UpperCAmelCase_) return False if value is None else bool(UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Union[str, Any]): """simple docstring""" a : List[Any] = self.get_value(UpperCAmelCase_) return False if value is None else not bool(UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]): """simple docstring""" return self._stage == 2 def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]): """simple docstring""" return self._stage == 3 def SCREAMING_SNAKE_CASE_ ( self : Dict): """simple docstring""" return self._offload class UpperCamelCase : """simple docstring""" def __init__( self : str , UpperCAmelCase_ : int): """simple docstring""" a : Union[str, Any] = engine def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any]): """simple docstring""" self.engine.backward(UpperCAmelCase_ , **UpperCAmelCase_) # Deepspeed's `engine.step` performs the following operations: # - gradient accumulation check # - gradient clipping # - optimizer step # - zero grad # - checking overflow # - lr_scheduler step (only if engine.lr_scheduler is not None) self.engine.step() # and this plugin overrides the above calls with no-ops when Accelerate runs under # Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple # training loop that works transparently under many training regimes. class UpperCamelCase ( a_ ): """simple docstring""" def __init__( self : Union[str, Any] , UpperCAmelCase_ : Any): """simple docstring""" super().__init__(UpperCAmelCase_ , device_placement=UpperCAmelCase_ , scaler=UpperCAmelCase_) a : List[str] = hasattr(self.optimizer , 'overflow') def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : Dict=None): """simple docstring""" pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed def SCREAMING_SNAKE_CASE_ ( self : Tuple): """simple docstring""" pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed @property def SCREAMING_SNAKE_CASE_ ( self : Dict): """simple docstring""" if self.__has_overflow__: return self.optimizer.overflow return False class UpperCamelCase ( a_ ): """simple docstring""" def __init__( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]): """simple docstring""" super().__init__(UpperCAmelCase_ , UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ ( self : int): """simple docstring""" pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed class UpperCamelCase : """simple docstring""" def __init__( self : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int]=0.0_01 , UpperCAmelCase_ : List[Any]=0 , **UpperCAmelCase_ : Union[str, Any]): """simple docstring""" a : int = params a : str = lr a : Tuple = weight_decay a : Dict = kwargs class UpperCamelCase : """simple docstring""" def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Union[str, Any]=0 , **UpperCAmelCase_ : List[Any]): """simple docstring""" a : str = optimizer a : Tuple = total_num_steps a : Optional[Any] = warmup_num_steps a : List[str] = kwargs
345
0
import re from filelock import FileLock try: import nltk A : Dict = True except (ImportError, ModuleNotFoundError): A : Any = False if NLTK_AVAILABLE: with FileLock('.lock') as lock: nltk.download('punkt', quiet=True) def UpperCamelCase ( __magic_name__ : str ) -> str: """simple docstring""" re.sub("""<n>""" , """""" , __magic_name__ ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__magic_name__ ) )
305
import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def UpperCamelCase ( __magic_name__ : List[Any] ) -> Optional[int]: """simple docstring""" return x + 2 class A ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase__ (self : Optional[Any] ) -> Any: """simple docstring""" lowercase__ = """x = 3""" lowercase__ = {} lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase ) assert result == 3 self.assertDictEqual(_UpperCAmelCase , {"""x""": 3} ) lowercase__ = """x = y""" lowercase__ = {"""y""": 5} lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_UpperCAmelCase , {"""x""": 5, """y""": 5} ) def lowerCamelCase__ (self : str ) -> Optional[Any]: """simple docstring""" lowercase__ = """y = add_two(x)""" lowercase__ = {"""x""": 3} lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase ) assert result == 5 self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 5} ) # Won't work without the tool with CaptureStdout() as out: lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase ) assert result is None assert "tried to execute add_two" in out.out def lowerCamelCase__ (self : List[Any] ) -> Optional[int]: """simple docstring""" lowercase__ = """x = 3""" lowercase__ = {} lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase ) assert result == 3 self.assertDictEqual(_UpperCAmelCase , {"""x""": 3} ) def lowerCamelCase__ (self : Optional[int] ) -> List[Any]: """simple docstring""" lowercase__ = """test_dict = {'x': x, 'y': add_two(x)}""" lowercase__ = {"""x""": 3} lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase ) self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 5} ) self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} ) def lowerCamelCase__ (self : List[str] ) -> List[Any]: """simple docstring""" lowercase__ = """x = 3\ny = 5""" lowercase__ = {} lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 5} ) def lowerCamelCase__ (self : List[Any] ) -> Dict: """simple docstring""" lowercase__ = """text = f'This is x: {x}.'""" lowercase__ = {"""x""": 3} lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """text""": """This is x: 3."""} ) def lowerCamelCase__ (self : List[str] ) -> int: """simple docstring""" lowercase__ = """if x <= 3:\n y = 2\nelse:\n y = 5""" lowercase__ = {"""x""": 3} lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 2} ) lowercase__ = {"""x""": 8} lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_UpperCAmelCase , {"""x""": 8, """y""": 5} ) def lowerCamelCase__ (self : Dict ) -> int: """simple docstring""" lowercase__ = """test_list = [x, add_two(x)]""" lowercase__ = {"""x""": 3} lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , [3, 5] ) self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_list""": [3, 5]} ) def lowerCamelCase__ (self : Any ) -> int: """simple docstring""" lowercase__ = """y = x""" lowercase__ = {"""x""": 3} lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase ) assert result == 3 self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 3} ) def lowerCamelCase__ (self : Union[str, Any] ) -> List[Any]: """simple docstring""" lowercase__ = """test_list = [x, add_two(x)]\ntest_list[1]""" lowercase__ = {"""x""": 3} lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase ) assert result == 5 self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_list""": [3, 5]} ) lowercase__ = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']""" lowercase__ = {"""x""": 3} lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase ) assert result == 5 self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} ) def lowerCamelCase__ (self : Union[str, Any] ) -> Any: """simple docstring""" lowercase__ = """x = 0\nfor i in range(3):\n x = i""" lowercase__ = {} lowercase__ = evaluate(_UpperCAmelCase , {"""range""": range} , state=_UpperCAmelCase ) assert result == 2 self.assertDictEqual(_UpperCAmelCase , {"""x""": 2, """i""": 2} )
305
1
'''simple docstring''' import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot import BlenderbotTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation snake_case__ = logging.get_logger(__name__) snake_case__ = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_config_file""": """tokenizer_config.json""", } snake_case__ = { """vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""}, """merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""}, """tokenizer_config_file""": { """facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json""" }, } snake_case__ = {"""facebook/blenderbot-3B""": 1_28} class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = VOCAB_FILES_NAMES _lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCAmelCase = ['input_ids', 'attention_mask'] _lowerCAmelCase = BlenderbotTokenizer def __init__( self : Dict , _lowerCamelCase : List[str]=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Dict=None , _lowerCamelCase : List[str]="replace" , _lowerCamelCase : str="<s>" , _lowerCamelCase : List[str]="</s>" , _lowerCamelCase : Optional[int]="</s>" , _lowerCamelCase : List[str]="<s>" , _lowerCamelCase : Optional[Any]="<unk>" , _lowerCamelCase : str="<pad>" , _lowerCamelCase : Optional[int]="<mask>" , _lowerCamelCase : Dict=False , _lowerCamelCase : Union[str, Any]=True , **_lowerCamelCase : List[Any] , ): """simple docstring""" super().__init__( _lowerCamelCase , _lowerCamelCase , tokenizer_file=_lowerCamelCase , errors=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase , **_lowerCamelCase , ) A_ : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space: A_ : str = getattr(_lowerCamelCase , pre_tok_state.pop('''type''' ) ) A_ : Any = add_prefix_space A_ : Optional[Any] = pre_tok_class(**_lowerCamelCase ) A_ : Tuple = add_prefix_space A_ : int = '''post_processor''' A_ : Union[str, Any] = getattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase ) if tokenizer_component_instance: A_ : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: A_ : Dict = tuple(state['''sep'''] ) if "cls" in state: A_ : Dict = tuple(state['''cls'''] ) A_ : Optional[int] = False if state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space: A_ : Any = add_prefix_space A_ : Union[str, Any] = True if state.get('''trim_offsets''' , _lowerCamelCase ) != trim_offsets: A_ : Dict = trim_offsets A_ : List[Any] = True if changes_to_apply: A_ : int = getattr(_lowerCamelCase , state.pop('''type''' ) ) A_ : List[Any] = component_class(**_lowerCamelCase ) setattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase ) @property # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot def _a ( self : Union[str, Any] ): """simple docstring""" if self._mask_token is None: if self.verbose: logger.error('''Using mask_token, but it is not set yet.''' ) return None return str(self._mask_token ) @mask_token.setter def _a ( self : Optional[Any] , _lowerCamelCase : Optional[int] ): """simple docstring""" A_ : Any = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else value A_ : str = value def _a ( self : int , *_lowerCamelCase : str , **_lowerCamelCase : Any ): """simple docstring""" A_ : List[str] = kwargs.get('''is_split_into_words''' , _lowerCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*_lowerCamelCase , **_lowerCamelCase ) def _a ( self : Optional[Any] , *_lowerCamelCase : Union[str, Any] , **_lowerCamelCase : Optional[int] ): """simple docstring""" A_ : Any = kwargs.get('''is_split_into_words''' , _lowerCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._encode_plus(*_lowerCamelCase , **_lowerCamelCase ) def _a ( self : List[str] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ): """simple docstring""" A_ : Tuple = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase ) return tuple(_lowerCamelCase ) def _a ( self : Dict , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ): """simple docstring""" A_ : int = [self.sep_token_id] A_ : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _a ( self : List[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ): """simple docstring""" return token_ids_a + [self.eos_token_id] def _a ( self : List[Any] , _lowerCamelCase : "Conversation" ): """simple docstring""" A_ : Optional[int] = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(''' ''' + text ) else: # Generated responses should contain them already. inputs.append(_lowerCamelCase ) A_ : Optional[int] = ''' '''.join(_lowerCamelCase ) A_ : List[str] = self.encode(_lowerCamelCase ) if len(_lowerCamelCase ) > self.model_max_length: A_ : int = input_ids[-self.model_max_length :] logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' ) return input_ids
361
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = ( 'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.' 'It takes two arguments named `image` which should be the original image, and `label` which should be a text ' 'describing the elements what should be identified in the segmentation mask. The tool returns the mask.' ) _lowerCAmelCase = 'CIDAS/clipseg-rd64-refined' _lowerCAmelCase = 'image_segmenter' _lowerCAmelCase = CLIPSegForImageSegmentation _lowerCAmelCase = ['image', 'text'] _lowerCAmelCase = ['image'] def __init__( self : Optional[int] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Union[str, Any] ): """simple docstring""" requires_backends(self , ['''vision'''] ) super().__init__(*_lowerCamelCase , **_lowerCamelCase ) def _a ( self : List[str] , _lowerCamelCase : "Image" , _lowerCamelCase : str ): """simple docstring""" return self.pre_processor(text=[label] , images=[image] , padding=_lowerCamelCase , return_tensors='''pt''' ) def _a ( self : Union[str, Any] , _lowerCamelCase : Optional[int] ): """simple docstring""" with torch.no_grad(): A_ : Optional[int] = self.model(**_lowerCamelCase ).logits return logits def _a ( self : List[str] , _lowerCamelCase : Optional[int] ): """simple docstring""" A_ : int = outputs.cpu().detach().numpy() A_ : Tuple = 0 A_ : List[str] = 1 return Image.fromarray((array * 255).astype(np.uinta ) )
4
0
'''simple docstring''' from ....utils import logging lowerCamelCase : Optional[Any] = logging.get_logger(__name__) class __lowerCAmelCase (lowercase_ ): '''simple docstring''' def __init__(self : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any]=None , UpperCamelCase : int=2048 ): '''simple docstring''' lowercase__ = config.__dict__ lowercase__ = modal_hidden_size if num_labels: lowercase__ = num_labels
2
"""simple docstring""" import math import random def lowercase_ ( _lowerCamelCase: float , _lowerCamelCase: bool = False ) -> float: '''simple docstring''' if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value __A = 0.02 def lowercase_ ( _lowerCamelCase: int , _lowerCamelCase: int ) -> float: '''simple docstring''' __lowerCamelCase : Tuple = float(2 * (random.randint(1 , 100 )) - 1 ) for _ in range(_lowerCamelCase ): # Forward propagation __lowerCamelCase : List[Any] = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? __lowerCamelCase : Any = (expected / 100) - layer_a # Error delta __lowerCamelCase : Dict = layer_1_error * sigmoid_function(_lowerCamelCase , _lowerCamelCase ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 100 if __name__ == "__main__": import doctest doctest.testmod() __A = int(input('''Expected value: ''')) __A = int(input('''Number of propagations: ''')) print(forward_propagation(expected, number_propagations))
135
0
"""simple docstring""" from .configuration_bert_masked import MaskedBertConfig from .modeling_bert_masked import ( MaskedBertForMultipleChoice, MaskedBertForQuestionAnswering, MaskedBertForSequenceClassification, MaskedBertForTokenClassification, MaskedBertModel, ) from .modules import *
241
"""simple docstring""" from .configuration_bert_masked import MaskedBertConfig from .modeling_bert_masked import ( MaskedBertForMultipleChoice, MaskedBertForQuestionAnswering, MaskedBertForSequenceClassification, MaskedBertForTokenClassification, MaskedBertModel, ) from .modules import *
241
1