code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" from collections import defaultdict def lowerCamelCase ( _UpperCamelCase : int ) -> int: '''simple docstring''' __UpperCAmelCase : Any = 1 __UpperCAmelCase : Dict = True for v in tree[start]: if v not in visited: ret += dfs(_UpperCamelCase ) if ret % 2 == 0: cuts.append(_UpperCamelCase ) return ret def lowerCamelCase ( ) -> Tuple: '''simple docstring''' dfs(1 ) if __name__ == "__main__": UpperCAmelCase : Dict = 10, 9 UpperCAmelCase : Union[str, Any] = defaultdict(list) UpperCAmelCase : dict[int, bool] = {} UpperCAmelCase : list[int] = [] UpperCAmelCase : Dict = 0 UpperCAmelCase : Dict = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
115
from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def a__ ( ) -> tuple[list[int], int]: UpperCAmelCase : str = [randint(-1_000 , 1_000 ) for i in range(10 )] UpperCAmelCase : Any = randint(-5_000 , 5_000 ) return (arr, r) _lowerCamelCase : Any = make_dataset() def a__ ( UpperCAmelCase : list[int] , UpperCAmelCase : int ) -> tuple[int, ...]: for triplet in permutations(UpperCAmelCase , 3 ): if sum(UpperCAmelCase ) == target: return tuple(sorted(UpperCAmelCase ) ) return (0, 0, 0) def a__ ( UpperCAmelCase : list[int] , UpperCAmelCase : int ) -> tuple[int, int, int]: arr.sort() UpperCAmelCase : Tuple = len(UpperCAmelCase ) for i in range(n - 1 ): UpperCAmelCase , UpperCAmelCase : int = i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def a__ ( ) -> tuple[float, float]: UpperCAmelCase : Union[str, Any] = ''' from __main__ import dataset, triplet_sum1, triplet_sum2 ''' UpperCAmelCase : Tuple = ''' triplet_sum1(*dataset) ''' UpperCAmelCase : List[str] = ''' triplet_sum2(*dataset) ''' UpperCAmelCase : Tuple = repeat(setup=UpperCAmelCase , stmt=UpperCAmelCase , repeat=5 , number=10_000 ) UpperCAmelCase : str = repeat(setup=UpperCAmelCase , stmt=UpperCAmelCase , repeat=5 , number=10_000 ) return (min(UpperCAmelCase ), min(UpperCAmelCase )) if __name__ == "__main__": from doctest import testmod testmod() _lowerCamelCase : int = solution_times() print(f"""The time for naive implementation is {times[0]}.""") print(f"""The time for optimized implementation is {times[1]}.""")
336
0
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import VivitImageProcessor class A__ ( unittest.TestCase ): '''simple docstring''' def __init__( self: str , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[Any]=7 , _SCREAMING_SNAKE_CASE: Optional[Any]=3 , _SCREAMING_SNAKE_CASE: int=10 , _SCREAMING_SNAKE_CASE: Tuple=18 , _SCREAMING_SNAKE_CASE: Union[str, Any]=30 , _SCREAMING_SNAKE_CASE: Any=400 , _SCREAMING_SNAKE_CASE: List[str]=True , _SCREAMING_SNAKE_CASE: Union[str, Any]=None , _SCREAMING_SNAKE_CASE: str=True , _SCREAMING_SNAKE_CASE: Union[str, Any]=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE: Any=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE: Dict=None , ) -> Union[str, Any]: """simple docstring""" __lowerCAmelCase : Optional[Any] = size if size is not None else {"shortest_edge": 18} __lowerCAmelCase : int = crop_size if crop_size is not None else {"height": 18, "width": 18} __lowerCAmelCase : Tuple = parent __lowerCAmelCase : List[Any] = batch_size __lowerCAmelCase : List[str] = num_channels __lowerCAmelCase : int = num_frames __lowerCAmelCase : Union[str, Any] = image_size __lowerCAmelCase : Tuple = min_resolution __lowerCAmelCase : Tuple = max_resolution __lowerCAmelCase : str = do_resize __lowerCAmelCase : Optional[int] = size __lowerCAmelCase : Optional[int] = do_normalize __lowerCAmelCase : Dict = image_mean __lowerCAmelCase : List[Any] = image_std __lowerCAmelCase : List[Any] = crop_size def _SCREAMING_SNAKE_CASE ( self: int) -> Union[str, Any]: """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE = VivitImageProcessor if is_vision_available() else None def _SCREAMING_SNAKE_CASE ( self: int) -> Tuple: """simple docstring""" __lowerCAmelCase : Optional[int] = VivitImageProcessingTester(self) @property def _SCREAMING_SNAKE_CASE ( self: int) -> Tuple: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Optional[int]: """simple docstring""" __lowerCAmelCase : int = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "image_mean")) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "image_std")) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "do_normalize")) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "do_resize")) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "do_center_crop")) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "size")) def _SCREAMING_SNAKE_CASE ( self: Any) -> Optional[Any]: """simple docstring""" __lowerCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {"shortest_edge": 18}) self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18}) __lowerCAmelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84) self.assertEqual(image_processor.size , {"shortest_edge": 42}) self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84}) def _SCREAMING_SNAKE_CASE ( self: int) -> Union[str, Any]: """simple docstring""" __lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict) # create random PIL videos __lowerCAmelCase : Dict = prepare_video_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE) for video in video_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) self.assertIsInstance(video[0] , Image.Image) # Test not batched input __lowerCAmelCase : Any = image_processing(video_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched __lowerCAmelCase : str = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def _SCREAMING_SNAKE_CASE ( self: List[str]) -> int: """simple docstring""" __lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors __lowerCAmelCase : Optional[int] = prepare_video_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE) for video in video_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) self.assertIsInstance(video[0] , np.ndarray) # Test not batched input __lowerCAmelCase : Any = image_processing(video_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched __lowerCAmelCase : List[str] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def _SCREAMING_SNAKE_CASE ( self: Dict) -> int: """simple docstring""" __lowerCAmelCase : str = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors __lowerCAmelCase : Optional[int] = prepare_video_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE) for video in video_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) self.assertIsInstance(video[0] , torch.Tensor) # Test not batched input __lowerCAmelCase : List[str] = image_processing(video_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched __lowerCAmelCase : Any = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , )
351
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( ImageTextPipelineOutput, UniDiffuserPipeline, ) else: from .modeling_text_decoder import UniDiffuserTextDecoder from .modeling_uvit import UniDiffuserModel, UTransformeraDModel from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
58
0
import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" _UpperCAmelCase = dataset _UpperCAmelCase = process _UpperCAmelCase = params def __len__( self ) -> Union[str, Any]: """simple docstring""" return len(self.dataset ) def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" _UpperCAmelCase = self.dataset[i] _UpperCAmelCase = self.process(_SCREAMING_SNAKE_CASE , **self.params ) return processed class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = loader _UpperCAmelCase = infer _UpperCAmelCase = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether _UpperCAmelCase = None _UpperCAmelCase = loader_batch_size # Internal bookkeeping _UpperCAmelCase = None _UpperCAmelCase = None def __len__( self ) -> Any: """simple docstring""" return len(self.loader ) def __iter__( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = iter(self.loader ) return self def UpperCAmelCase__ ( self ) -> int: """simple docstring""" if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice _UpperCAmelCase = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) _UpperCAmelCase = {} for k, element in self._loader_batch_data.items(): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # Convert ModelOutput to tuple first _UpperCAmelCase = element.to_tuple() if isinstance(element[0] , torch.Tensor ): _UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): _UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around _UpperCAmelCase = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _UpperCAmelCase = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _UpperCAmelCase = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. _UpperCAmelCase = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 _UpperCAmelCase = self._loader_batch_data.__class__(_SCREAMING_SNAKE_CASE ) self._loader_batch_index += 1 return result def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch _UpperCAmelCase = next(self.iterator ) _UpperCAmelCase = self.infer(_SCREAMING_SNAKE_CASE , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ): _UpperCAmelCase = processed else: _UpperCAmelCase = list(processed.keys() )[0] _UpperCAmelCase = processed[key] if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) else: _UpperCAmelCase = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _UpperCAmelCase = observed_batch_size # Setting internal index to unwrap the batch _UpperCAmelCase = processed _UpperCAmelCase = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Tuple: """simple docstring""" super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __iter__( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = iter(self.loader ) _UpperCAmelCase = None return self def UpperCAmelCase__ ( self ) -> int: """simple docstring""" if self.subiterator is None: _UpperCAmelCase = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item _UpperCAmelCase = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators _UpperCAmelCase = self.infer(next(self.iterator ) , **self.params ) _UpperCAmelCase = next(self.subiterator ) return processed class __a ( UpperCAmelCase ): def __iter__( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = iter(self.loader ) return self def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = False _UpperCAmelCase = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: _UpperCAmelCase = self.loader_batch_item() _UpperCAmelCase = item.pop('is_last' ) accumulator.append(_SCREAMING_SNAKE_CASE ) if is_last: return accumulator while not is_last: _UpperCAmelCase = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ): _UpperCAmelCase = processed else: _UpperCAmelCase = list(processed.keys() )[0] _UpperCAmelCase = processed[key] if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) else: _UpperCAmelCase = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _UpperCAmelCase = observed_batch_size _UpperCAmelCase = processed _UpperCAmelCase = 0 while self._loader_batch_index < self.loader_batch_size: _UpperCAmelCase = self.loader_batch_item() _UpperCAmelCase = item.pop('is_last' ) accumulator.append(_SCREAMING_SNAKE_CASE ) if is_last: return accumulator else: _UpperCAmelCase = processed _UpperCAmelCase = item.pop('is_last' ) accumulator.append(_SCREAMING_SNAKE_CASE ) return accumulator class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = dataset _UpperCAmelCase = key def __len__( self ) -> Optional[int]: """simple docstring""" return len(self.dataset ) def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" return self.dataset[i][self.key] class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" _UpperCAmelCase = dataset _UpperCAmelCase = keya _UpperCAmelCase = keya def __len__( self ) -> Optional[int]: """simple docstring""" return len(self.dataset ) def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
329
import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def lowerCAmelCase__ ( a__: List[Any] , a__: Union[str, Any]=1_0 ) -> Any: '''simple docstring''' _UpperCAmelCase = [] for _ in range(a__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def lowerCAmelCase__ ( a__: List[str] , a__: Any=1_0 ) -> List[Any]: '''simple docstring''' _UpperCAmelCase = [] for step in range(a__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: _UpperCAmelCase = os.path.join(a__ , 'schedule.bin' ) torch.save(scheduler.state_dict() , a__ ) _UpperCAmelCase = torch.load(a__ ) scheduler.load_state_dict(a__ ) return lrs @require_torch class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) ) for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): self.assertAlmostEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , delta=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" _UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] ) _UpperCAmelCase = nn.MSELoss() # No warmup, constant schedule, no gradient clipping _UpperCAmelCase = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 ) for _ in range(100 ): _UpperCAmelCase = criterion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] ) _UpperCAmelCase = nn.MSELoss() # No warmup, constant schedule, no gradient clipping _UpperCAmelCase = Adafactor( params=[w] , lr=1e-2 , eps=(1e-3_0, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=_SCREAMING_SNAKE_CASE , weight_decay=0.0 , relative_step=_SCREAMING_SNAKE_CASE , scale_parameter=_SCREAMING_SNAKE_CASE , warmup_init=_SCREAMING_SNAKE_CASE , ) for _ in range(1000 ): _UpperCAmelCase = criterion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) @require_torch class __a ( unittest.TestCase ): _a : Dict = nn.Linear(50 , 50 ) if is_torch_available() else None _a : Dict = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None _a : List[Any] = 10 def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> str: """simple docstring""" self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) ) for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): self.assertAlmostEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , delta=_SCREAMING_SNAKE_CASE , msg=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase = {'num_warmup_steps': 2, 'num_training_steps': 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) _UpperCAmelCase = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {'num_warmup_steps': 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, 'num_cycles': 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, 'power': 2.0, 'lr_end': 1e-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {'num_warmup_steps': 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): _UpperCAmelCase , _UpperCAmelCase = data _UpperCAmelCase = scheduler_func(self.optimizer , **_SCREAMING_SNAKE_CASE ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) _UpperCAmelCase = unwrap_schedule(_SCREAMING_SNAKE_CASE , self.num_steps ) self.assertListAlmostEqual( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tol=1e-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , ) _UpperCAmelCase = scheduler_func(self.optimizer , **_SCREAMING_SNAKE_CASE ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(_SCREAMING_SNAKE_CASE ) # wrap to test picklability of the schedule _UpperCAmelCase = unwrap_and_save_reload_schedule(_SCREAMING_SNAKE_CASE , self.num_steps ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , msg=f'''failed for {scheduler_func} in save and reload''' ) class __a : def __init__( self , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" _UpperCAmelCase = fn def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" return self.fn(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @classmethod def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" _UpperCAmelCase = list(map(self , scheduler.lr_lambdas ) )
329
1
'''simple docstring''' from math import factorial def UpperCAmelCase_ ( __lowerCamelCase : int ,__lowerCamelCase : int ,__lowerCamelCase : float ): if successes > trials: raise ValueError("successes must be lower or equal to trials" ) if trials < 0 or successes < 0: raise ValueError("the function is defined for non-negative integers" ) if not isinstance(__lowerCamelCase ,__lowerCamelCase ) or not isinstance(__lowerCamelCase ,__lowerCamelCase ): raise ValueError("the function is defined for non-negative integers" ) if not 0 < prob < 1: raise ValueError("prob has to be in range of 1 - 0" ) lowercase_ :int = (prob**successes) * ((1 - prob) ** (trials - successes)) # Calculate the binomial coefficient: n! / k!(n-k)! lowercase_ :Union[str, Any] = float(factorial(__lowerCamelCase ) ) coefficient /= factorial(__lowerCamelCase ) * factorial(trials - successes ) return probability * coefficient if __name__ == "__main__": from doctest import testmod testmod() print('''Probability of 2 successes out of 4 trails''') print('''with probability of 0.75 is:''', end=''' ''') print(binomial_distribution(2, 4, 0.75))
147
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__) lowerCAmelCase : Optional[Any] ={ '''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class a_ ( _lowerCAmelCase ): __A = "gpt_neo" __A = ["past_key_values"] __A = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} def __init__( self : Union[str, Any] , lowercase : Tuple=50_257 , lowercase : Optional[Any]=2_048 , lowercase : Union[str, Any]=2_048 , lowercase : int=24 , lowercase : Optional[Any]=[[["global", "local"], 12]] , lowercase : List[Any]=16 , lowercase : List[str]=None , lowercase : Union[str, Any]=256 , lowercase : Optional[Any]="gelu_new" , lowercase : Any=0.0 , lowercase : List[Any]=0.0 , lowercase : Any=0.0 , lowercase : str=0.1 , lowercase : Dict=1e-5 , lowercase : List[str]=0.02 , lowercase : Union[str, Any]=True , lowercase : int=50_256 , lowercase : Union[str, Any]=50_256 , **lowercase : Dict , ): """simple docstring""" lowercase_ :str = vocab_size lowercase_ :Tuple = max_position_embeddings lowercase_ :Tuple = hidden_size lowercase_ :List[str] = num_layers lowercase_ :int = num_heads lowercase_ :Union[str, Any] = intermediate_size lowercase_ :Tuple = window_size lowercase_ :Any = activation_function lowercase_ :Tuple = resid_dropout lowercase_ :Any = embed_dropout lowercase_ :str = attention_dropout lowercase_ :List[str] = classifier_dropout lowercase_ :List[Any] = layer_norm_epsilon lowercase_ :List[str] = initializer_range lowercase_ :int = use_cache lowercase_ :Tuple = bos_token_id lowercase_ :Optional[Any] = eos_token_id lowercase_ :int = attention_types lowercase_ :Tuple = self.expand_attention_types_params(lowercase ) if len(self.attention_layers ) != self.num_layers: raise ValueError( "Configuration for convolutional module is incorrect. " "It is required that `len(config.attention_layers)` == `config.num_layers` " F'but is `len(config.attention_layers) = {len(self.attention_layers )}`, ' F'`config.num_layers = {self.num_layers}`. ' "`config.attention_layers` is prepared using `config.attention_types`. " "Please verify the value of `config.attention_types` argument." ) super().__init__(bos_token_id=lowercase , eos_token_id=lowercase , **lowercase ) @staticmethod def lowercase__ ( lowercase : str ): """simple docstring""" lowercase_ :Union[str, Any] = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def UpperCAmelCase_ ( __lowerCamelCase : Union[str, Any] ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : Dict ): import torch lowercase_ :List[str] = input.size() lowercase_ :Union[str, Any] = len(__lowerCamelCase ) lowercase_ :Any = shape[dimension] lowercase_ :str = torch.arange(0 ,__lowerCamelCase ,__lowerCamelCase ) lowercase_ :Union[str, Any] = torch.div(sizedim - size ,__lowerCamelCase ,rounding_mode="floor" ) + 1 lowercase_ :int = torch.arange(__lowerCamelCase ) + low_indices[:min_length][:, None] lowercase_ :List[Any] = [slice(__lowerCamelCase )] * rank lowercase_ :int = indices lowercase_ :Dict = input[s] lowercase_ :List[str] = list(range(0 ,rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(__lowerCamelCase ) def UpperCAmelCase_ ( __lowerCamelCase : List[str] ,__lowerCamelCase : Any ): import torch lowercase_ :List[Any] = torch.arange(1 ,__lowerCamelCase ) lowercase_ :int = torch.remainder(__lowerCamelCase ,__lowerCamelCase ) lowercase_ :Optional[int] = remainders == 0 lowercase_ :int = candidates[divisor_indices] lowercase_ :Tuple = torch.max(__lowerCamelCase ) return largest_divisor, torch.div(__lowerCamelCase ,__lowerCamelCase ,rounding_mode="floor" ) class a_ ( _lowerCAmelCase ): @property def lowercase__ ( self : str ): """simple docstring""" lowercase_ :int = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} ) if self.use_past: self.fill_with_past_key_values_(lowercase , direction="inputs" ) lowercase_ :Union[str, Any] = {0: "batch", 1: "past_sequence + sequence"} else: lowercase_ :str = {0: "batch", 1: "sequence"} return common_inputs @property def lowercase__ ( self : Tuple ): """simple docstring""" return self._config.num_heads def lowercase__ ( self : List[str] , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ): """simple docstring""" lowercase_ :List[str] = super(lowercase , self ).generate_dummy_inputs( lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase ) # We need to order the input in the way they appears in the forward() lowercase_ :Tuple = OrderedDict({"input_ids": common_inputs["input_ids"]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch lowercase_ , lowercase_ :Tuple = common_inputs["input_ids"].shape # Not using the same length for past_key_values lowercase_ :Any = seqlen + 2 lowercase_ :List[Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) lowercase_ :Dict = [ (torch.zeros(lowercase ), torch.zeros(lowercase )) for _ in range(self.num_layers ) ] lowercase_ :Tuple = common_inputs["attention_mask"] if self.use_past: lowercase_ :Optional[int] = ordered_inputs["attention_mask"].dtype lowercase_ :List[Any] = torch.cat( [ordered_inputs["attention_mask"], torch.ones(lowercase , lowercase , dtype=lowercase )] , dim=1 ) return ordered_inputs @property def lowercase__ ( self : int ): """simple docstring""" return 13
147
1
import math def _UpperCAmelCase ( snake_case , snake_case ): """simple docstring""" if initial_intensity < 0: raise ValueError("""The value of intensity cannot be negative""" ) # handling of negative values of initial intensity if angle < 0 or angle > 3_60: raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" ) # handling of values out of allowed range return initial_intensity * (math.cos(math.radians(snake_case ) ) ** 2) if __name__ == "__main__": import doctest doctest.testmod(name="""malus_law""")
82
A__ = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} A__ = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def _UpperCAmelCase ( snake_case , snake_case , snake_case ): """simple docstring""" _lowerCAmelCase = True _lowerCAmelCase = [] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(snake_case , snake_case , snake_case ) order.append(snake_case ) return order def _UpperCAmelCase ( snake_case , snake_case , snake_case ): """simple docstring""" _lowerCAmelCase = True _lowerCAmelCase = [vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(snake_case , snake_case , snake_case ) return component def _UpperCAmelCase ( snake_case ): """simple docstring""" _lowerCAmelCase = len(snake_case ) * [False] _lowerCAmelCase = {vert: [] for vert in range(len(snake_case ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(snake_case ) _lowerCAmelCase = [] for i, was_visited in enumerate(snake_case ): if not was_visited: order += topology_sort(snake_case , snake_case , snake_case ) _lowerCAmelCase = [] _lowerCAmelCase = len(snake_case ) * [False] for i in range(len(snake_case ) ): _lowerCAmelCase = order[len(snake_case ) - i - 1] if not visited[vert]: _lowerCAmelCase = find_components(snake_case , snake_case , snake_case ) components_list.append(snake_case ) return components_list
82
1
'''simple docstring''' import contextlib from multiprocessing import Pool, RLock from tqdm.auto import tqdm from ..utils import experimental, logging _lowercase : List[Any] = logging.get_logger(__name__) class lowerCAmelCase__ : """simple docstring""" lowerCAmelCase_ = None @experimental def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int ): """simple docstring""" if ParallelBackendConfig.backend_name is None: return _map_with_multiprocessing_pool( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) return _map_with_joblib(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" lowercase_ : List[str] = num_proc if num_proc <= len(_UpperCAmelCase ) else len(_UpperCAmelCase ) lowercase_ : Union[str, Any] = [] # We organize the splits ourselve (contiguous splits) for index in range(_UpperCAmelCase ): lowercase_ : str = len(_UpperCAmelCase ) // num_proc lowercase_ : Optional[Any] = len(_UpperCAmelCase ) % num_proc lowercase_ : List[Any] = div * index + min(_UpperCAmelCase , _UpperCAmelCase ) lowercase_ : List[str] = start + div + (1 if index < mod else 0) split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) ) if len(_UpperCAmelCase ) != sum(len(i[1] ) for i in split_kwds ): raise ValueError( F'''Error dividing inputs iterable among processes. ''' F'''Total number of objects {len(_UpperCAmelCase )}, ''' F'''length: {sum(len(i[1] ) for i in split_kwds )}''' ) logger.info( F'''Spawning {num_proc} processes for {len(_UpperCAmelCase )} objects in slices of {[len(i[1] ) for i in split_kwds]}''' ) lowercase_ : Optional[Any] = None, None if not disable_tqdm: lowercase_ : Dict = (RLock(),), tqdm.set_lock with Pool(_UpperCAmelCase , initargs=_UpperCAmelCase , initializer=_UpperCAmelCase ) as pool: lowercase_ : Optional[int] = pool.map(_UpperCAmelCase , _UpperCAmelCase ) logger.info(F'''Finished {num_proc} processes''' ) lowercase_ : Any = [obj for proc_res in mapped for obj in proc_res] logger.info(F'''Unpacked {len(_UpperCAmelCase )} objects''' ) return mapped def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" import joblib with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=_UpperCAmelCase ): return joblib.Parallel()( joblib.delayed(_UpperCAmelCase )((function, obj, types, None, True, None) ) for obj in iterable ) @experimental @contextlib.contextmanager def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" lowercase_ : Optional[Any] = backend_name if backend_name == "spark": from joblibspark import register_spark register_spark() # TODO: call create_cache_and_write_probe if "download" in steps # TODO: raise NotImplementedError when Dataset.map etc is called try: yield finally: lowercase_ : Tuple = None
359
'''simple docstring''' from __future__ import annotations class lowerCAmelCase__ : def __init__( self , __SCREAMING_SNAKE_CASE = 0 ): """simple docstring""" lowercase_ : Any = key def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : List[str] = key or self.__key or 1 # make sure key is an appropriate size key %= 2_55 return [chr(ord(__SCREAMING_SNAKE_CASE ) ^ key ) for ch in content] def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = key or self.__key or 1 # make sure key is an appropriate size key %= 2_55 return [chr(ord(__SCREAMING_SNAKE_CASE ) ^ key ) for ch in content] def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0 ): """simple docstring""" assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : List[Any] = key or self.__key or 1 # make sure key can be any size while key > 2_55: key -= 2_55 # This will be returned lowercase_ : str = '''''' for ch in content: ans += chr(ord(__SCREAMING_SNAKE_CASE ) ^ key ) return ans def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0 ): """simple docstring""" assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Any = key or self.__key or 1 # make sure key can be any size while key > 2_55: key -= 2_55 # This will be returned lowercase_ : Dict = '''''' for ch in content: ans += chr(ord(__SCREAMING_SNAKE_CASE ) ^ key ) return ans def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0 ): """simple docstring""" assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) try: with open(__SCREAMING_SNAKE_CASE ) as fin, open('''encrypt.out''' , '''w+''' ) as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) except OSError: return False return True def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) try: with open(__SCREAMING_SNAKE_CASE ) as fin, open('''decrypt.out''' , '''w+''' ) as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
264
0
import re def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = re.compile( R'''^(?:0|94|\+94|0{2}94)''' R'''7(0|1|2|4|5|6|7|8)''' R'''(-| |)''' R'''\d{7}$''' ) return bool(re.search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) if __name__ == "__main__": lowerCAmelCase_ = '''0094702343221''' print(is_sri_lankan_phone_number(phone))
8
from __future__ import annotations from math import pi, sqrt def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if inductance <= 0: raise ValueError('''Inductance cannot be 0 or negative''' ) elif capacitance <= 0: raise ValueError('''Capacitance cannot be 0 or negative''' ) else: return ( "Resonant frequency", float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ), ) if __name__ == "__main__": import doctest doctest.testmod()
8
1
'''simple docstring''' def lowerCAmelCase__ ( lowerCamelCase : int ): if number > 0: raise ValueError('input must be a negative integer' ) _A : int = len(bin(lowerCamelCase )[3:] ) _A : Any = bin(abs(lowerCamelCase ) - (1 << binary_number_length) )[3:] _A : Optional[int] = ( ( '1' + '0' * (binary_number_length - len(lowerCamelCase )) + twos_complement_number ) if number < 0 else '0' ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
227
'''simple docstring''' def lowerCAmelCase__ ( lowerCamelCase : int = 10 ): if not isinstance(lowerCamelCase ,lowerCamelCase ) or n < 0: raise ValueError('Invalid input' ) _A : Optional[Any] = 10**n _A : List[str] = 28433 * (pow(2 ,7830457 ,lowerCamelCase )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(f"""{solution(10) = }""")
227
1
'''simple docstring''' from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal __SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Tuple = TypeVar("""DatasetType""", Dataset, IterableDataset) def UpperCamelCase_ ( _UpperCAmelCase : List[DatasetType] , _UpperCAmelCase : Optional[List[float]] = None , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[DatasetInfo] = None , _UpperCAmelCase : Optional[NamedSplit] = None , _UpperCAmelCase : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType: """simple docstring""" from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError("Unable to interleave an empty list of datasets." ) for i, dataset in enumerate(_UpperCAmelCase ): if not isinstance(_UpperCAmelCase , (Dataset, IterableDataset) ): if isinstance(_UpperCAmelCase , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ "is an empty dataset dictionary." ) raise ValueError( F"""Dataset at position {i} has at least one split: {list(_UpperCAmelCase )}\n""" F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_UpperCAmelCase ) )}']""" ) raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCAmelCase ).__name__}.""" ) if i == 0: _UpperCAmelCase , _UpperCAmelCase : Dict = ( (Dataset, IterableDataset) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else (IterableDataset, Dataset) ) elif not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError( F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" ) if dataset_type is Dataset: return _interleave_map_style_datasets( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , info=_UpperCAmelCase , split=_UpperCAmelCase , stopping_strategy=_UpperCAmelCase ) else: return _interleave_iterable_datasets( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , info=_UpperCAmelCase , split=_UpperCAmelCase , stopping_strategy=_UpperCAmelCase ) def UpperCamelCase_ ( _UpperCAmelCase : List[DatasetType] , _UpperCAmelCase : Optional[DatasetInfo] = None , _UpperCAmelCase : Optional[NamedSplit] = None , _UpperCAmelCase : int = 0 , ) -> DatasetType: """simple docstring""" if not dsets: raise ValueError("Unable to concatenate an empty list of datasets." ) for i, dataset in enumerate(_UpperCAmelCase ): if not isinstance(_UpperCAmelCase , (Dataset, IterableDataset) ): if isinstance(_UpperCAmelCase , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ "is an empty dataset dictionary." ) raise ValueError( F"""Dataset at position {i} has at least one split: {list(_UpperCAmelCase )}\n""" F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_UpperCAmelCase ) )}']""" ) raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCAmelCase ).__name__}.""" ) if i == 0: _UpperCAmelCase , _UpperCAmelCase : Dict = ( (Dataset, IterableDataset) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else (IterableDataset, Dataset) ) elif not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError( F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if dataset_type is Dataset: return _concatenate_map_style_datasets(_UpperCAmelCase , info=_UpperCAmelCase , split=_UpperCAmelCase , axis=_UpperCAmelCase ) else: return _concatenate_iterable_datasets(_UpperCAmelCase , info=_UpperCAmelCase , split=_UpperCAmelCase , axis=_UpperCAmelCase )
31
'''simple docstring''' def UpperCamelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int: """simple docstring""" _UpperCAmelCase : List[str] = 1 # To kept the Calculated Value # Since C(n, k) = C(n, n-k) if k > (n - k): _UpperCAmelCase : Any = n - k # Calculate C(n,k) for i in range(_UpperCAmelCase ): result *= n - i result //= i + 1 return result def UpperCamelCase_ ( _UpperCAmelCase : int ) -> int: """simple docstring""" return binomial_coefficient(2 * node_count , _UpperCAmelCase ) // (node_count + 1) def UpperCamelCase_ ( _UpperCAmelCase : int ) -> int: """simple docstring""" if n < 0: raise ValueError("factorial() not defined for negative values" ) _UpperCAmelCase : List[str] = 1 for i in range(1 , n + 1 ): result *= i return result def UpperCamelCase_ ( _UpperCAmelCase : int ) -> int: """simple docstring""" return catalan_number(_UpperCAmelCase ) * factorial(_UpperCAmelCase ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Dict = int(input("""Enter the number of nodes: """).strip() or 0) if node_count <= 0: raise ValueError("""We need some nodes to work with.""") print( F'Given {node_count} nodes, there are {binary_tree_count(node_count)} ' F'binary trees and {catalan_number(node_count)} binary search trees.' )
31
1
# Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING import numpy as np import pyarrow as pa from .. import config from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import torch class lowercase ( TensorFormatter[Mapping, """torch.Tensor""", Mapping] ): '''simple docstring''' def __init__( self , _snake_case=None , **_snake_case ) -> int: """simple docstring""" super().__init__(features=_snake_case ) UpperCAmelCase = torch_tensor_kwargs import torch # noqa import torch at initialization def snake_case_ ( self , _snake_case ) -> Union[str, Any]: """simple docstring""" import torch if isinstance(_snake_case , _snake_case ) and column: if all( isinstance(_snake_case , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return torch.stack(_snake_case ) return column def snake_case_ ( self , _snake_case ) -> Optional[int]: """simple docstring""" import torch if isinstance(_snake_case , (str, bytes, type(_snake_case )) ): return value elif isinstance(_snake_case , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() UpperCAmelCase = {} if isinstance(_snake_case , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): UpperCAmelCase = {'''dtype''': torch.intaa} elif isinstance(_snake_case , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): UpperCAmelCase = {'''dtype''': torch.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(_snake_case , PIL.Image.Image ): UpperCAmelCase = np.asarray(_snake_case ) return torch.tensor(_snake_case , **{**default_dtype, **self.torch_tensor_kwargs} ) def snake_case_ ( self , _snake_case ) -> Optional[Any]: """simple docstring""" import torch # support for torch, tf, jax etc. if hasattr(_snake_case , '''__array__''' ) and not isinstance(_snake_case , torch.Tensor ): UpperCAmelCase = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(_snake_case , np.ndarray ): if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(_snake_case ) for substruct in data_struct] ) elif isinstance(_snake_case , (list, tuple) ): return self._consolidate([self.recursive_tensorize(_snake_case ) for substruct in data_struct] ) return self._tensorize(_snake_case ) def snake_case_ ( self , _snake_case ) -> List[Any]: """simple docstring""" return map_nested(self._recursive_tensorize , _snake_case , map_list=_snake_case ) def snake_case_ ( self , _snake_case ) -> Mapping: """simple docstring""" UpperCAmelCase = self.numpy_arrow_extractor().extract_row(_snake_case ) UpperCAmelCase = self.python_features_decoder.decode_row(_snake_case ) return self.recursive_tensorize(_snake_case ) def snake_case_ ( self , _snake_case ) -> "torch.Tensor": """simple docstring""" UpperCAmelCase = self.numpy_arrow_extractor().extract_column(_snake_case ) UpperCAmelCase = self.python_features_decoder.decode_column(_snake_case , pa_table.column_names[0] ) UpperCAmelCase = self.recursive_tensorize(_snake_case ) UpperCAmelCase = self._consolidate(_snake_case ) return column def snake_case_ ( self , _snake_case ) -> Mapping: """simple docstring""" UpperCAmelCase = self.numpy_arrow_extractor().extract_batch(_snake_case ) UpperCAmelCase = self.python_features_decoder.decode_batch(_snake_case ) UpperCAmelCase = self.recursive_tensorize(_snake_case ) for column_name in batch: UpperCAmelCase = self._consolidate(batch[column_name] ) return batch
358
import collections import json import os import re from typing import TYPE_CHECKING, List, Optional, Tuple import numpy as np from ...tokenization_utils_fast import PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __magic_name__ = logging.get_logger(__name__) __magic_name__ = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"} __magic_name__ = { "vocab_file": { "abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt", }, "emoji_file": { "abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json", }, } __magic_name__ = { "abeja/gpt-neox-japanese-2.7b": 2048, } def _lowerCAmelCase ( A__: List[Any] , A__: int ): '''simple docstring''' with open(A__ , '''r''' , encoding='''utf-8''' ) as f: UpperCAmelCase = json.loads(f.read() ) UpperCAmelCase = collections.OrderedDict() UpperCAmelCase = collections.OrderedDict() UpperCAmelCase = collections.OrderedDict() with open(A__ , '''r''' , encoding='''utf-8''' ) as f: UpperCAmelCase = f.readlines() UpperCAmelCase = [[t.rstrip('''\n''' )] if (t == ''',''' or ''',''' not in t) else t.rstrip('''\n''' ).split(''',''' ) for t in token] for idx, b in enumerate(A__ ): UpperCAmelCase = b UpperCAmelCase = idx for wd in b: UpperCAmelCase = idx return vocab, raw_vocab, ids_to_tokens, emoji class lowercase ( A__ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""] def __init__( self , _snake_case , _snake_case , _snake_case="<|endoftext|>" , _snake_case="<|endoftext|>" , _snake_case="<|startoftext|>" , _snake_case="<|endoftext|>" , _snake_case=False , **_snake_case , ) -> Tuple: """simple docstring""" super().__init__( unk_token=_snake_case , pad_token=_snake_case , bos_token=_snake_case , eos_token=_snake_case , do_clean_text=_snake_case , **_snake_case , ) if not os.path.isfile(_snake_case ): raise ValueError( f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained""" ''' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' ) if not os.path.isfile(_snake_case ): raise ValueError( f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google""" ''' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' ) UpperCAmelCase = do_clean_text UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = load_vocab_and_emoji(_snake_case , _snake_case ) UpperCAmelCase = SubWordJapaneseTokenizer( vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji ) @property def snake_case_ ( self ) -> Any: """simple docstring""" # self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab return len(self.raw_vocab ) def snake_case_ ( self ) -> Union[str, Any]: """simple docstring""" return dict(self.raw_vocab , **self.added_tokens_encoder ) def snake_case_ ( self , _snake_case ) -> List[Any]: """simple docstring""" return self.subword_tokenizer.tokenize(_snake_case , clean=self.do_clean_text ) def snake_case_ ( self , _snake_case ) -> Dict: """simple docstring""" return self.vocab.get(_snake_case , self.vocab.get(self.unk_token ) ) def snake_case_ ( self , _snake_case ) -> Optional[int]: """simple docstring""" return self.subword_tokenizer.convert_id_to_token(_snake_case ) def snake_case_ ( self , _snake_case ) -> List[str]: """simple docstring""" UpperCAmelCase = ''''''.join(_snake_case ).strip() return out_string def snake_case_ ( self , _snake_case ) -> List[int]: """simple docstring""" UpperCAmelCase = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(_snake_case , add_special_tokens=_snake_case ) + [self.eos_token_id] ) if len(_snake_case ) > self.model_max_length: UpperCAmelCase = input_ids[-self.model_max_length :] return input_ids def snake_case_ ( self , _snake_case , _snake_case = None ) -> Tuple[str]: """simple docstring""" UpperCAmelCase = 0 if os.path.isdir(_snake_case ): UpperCAmelCase = os.path.join( _snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase = os.path.join( _snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''emoji_file'''] ) else: UpperCAmelCase = ( (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase = ( (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''emoji_file'''] ) with open(_snake_case , '''w''' , encoding='''utf-8''' ) as writer: for token_index, token in self.ids_to_tokens.items(): if index != token_index: logger.warning( f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" ''' Please check that the vocabulary is not corrupted!''' ) UpperCAmelCase = token_index writer.write(''','''.join(_snake_case ) + '''\n''' ) index += 1 with open(_snake_case , '''w''' , encoding='''utf-8''' ) as writer: json.dump(self.emoji , _snake_case ) return vocab_file, emoji_file class lowercase ( A__ ): '''simple docstring''' def __init__( self , _snake_case , _snake_case , _snake_case ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase = vocab # same as swe UpperCAmelCase = ids_to_tokens # same as bpe UpperCAmelCase = emoji UpperCAmelCase = np.max([len(_snake_case ) for w in self.vocab.keys()] ) UpperCAmelCase = re.compile(R'''(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)''' ) UpperCAmelCase = re.compile(R'''[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*''' ) UpperCAmelCase = re.compile(R'''[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}''' ) UpperCAmelCase = re.compile( R'''([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' ) UpperCAmelCase = re.compile( R'''(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' ) UpperCAmelCase = re.compile( R'''((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*''' ) UpperCAmelCase = '''─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿''' UpperCAmelCase = '''▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟''' UpperCAmelCase = str.maketrans({k: '''<BLOCK>''' for k in keisen + blocks} ) def __len__( self ) -> Dict: """simple docstring""" return len(self.ids_to_tokens ) def snake_case_ ( self , _snake_case ) -> str: """simple docstring""" UpperCAmelCase = self.content_repattera.sub('''<URL>''' , _snake_case ) UpperCAmelCase = self.content_repattera.sub('''<EMAIL>''' , _snake_case ) UpperCAmelCase = self.content_repattera.sub('''<TEL>''' , _snake_case ) UpperCAmelCase = self.content_repattera.sub('''<DATE>''' , _snake_case ) UpperCAmelCase = self.content_repattera.sub('''<DATE>''' , _snake_case ) UpperCAmelCase = self.content_repattera.sub('''<PRICE>''' , _snake_case ) UpperCAmelCase = content.translate(self.content_transa ) while "<BLOCK><BLOCK>" in content: UpperCAmelCase = content.replace('''<BLOCK><BLOCK>''' , '''<BLOCK>''' ) return content def snake_case_ ( self , _snake_case , _snake_case=False ) -> str: """simple docstring""" UpperCAmelCase = text.replace(''' ''' , '''<SP>''' ) UpperCAmelCase = text.replace(''' ''' , '''<SP>''' ) UpperCAmelCase = text.replace('''\r\n''' , '''<BR>''' ) UpperCAmelCase = text.replace('''\n''' , '''<BR>''' ) UpperCAmelCase = text.replace('''\r''' , '''<BR>''' ) UpperCAmelCase = text.replace('''\t''' , '''<TAB>''' ) UpperCAmelCase = text.replace('''—''' , '''ー''' ) UpperCAmelCase = text.replace('''−''' , '''ー''' ) for k, v in self.emoji["emoji"].items(): if k in text: UpperCAmelCase = text.replace(_snake_case , _snake_case ) if clean: UpperCAmelCase = self.clean_text(_snake_case ) def check_simbol(_snake_case ): UpperCAmelCase = x.encode() if len(_snake_case ) == 1 and len(_snake_case ) == 2: UpperCAmelCase = (int(e[0] ) << 8) + int(e[1] ) if ( (c >= 0XC2A1 and c <= 0XC2BF) or (c >= 0XC780 and c <= 0XC783) or (c >= 0XCAB9 and c <= 0XCBBF) or (c >= 0XCC80 and c <= 0XCDA2) ): return True return False def checkuae(_snake_case ): UpperCAmelCase = x.encode() if len(_snake_case ) == 1 and len(_snake_case ) == 3: UpperCAmelCase = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] ) if c >= 0XE28080 and c <= 0XE2B07F: return True return False UpperCAmelCase = 0 UpperCAmelCase = [] while pos < len(_snake_case ): UpperCAmelCase = min(len(_snake_case ) , pos + self.maxlen + 1 ) if text[pos] == '''<''' else pos + 3 UpperCAmelCase = [] # (token_id, token, pos) for e in range(_snake_case , _snake_case , -1 ): UpperCAmelCase = text[pos:e] if wd in self.vocab: if wd[0] == "<" and len(_snake_case ) > 2: UpperCAmelCase = [(self.vocab[wd], wd, e)] break else: candidates.append((self.vocab[wd], wd, e) ) if len(_snake_case ) > 0: # the smallest token_id is adopted UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = sorted(_snake_case , key=lambda _snake_case : x[0] )[0] result.append(_snake_case ) UpperCAmelCase = e else: UpperCAmelCase = pos + 1 UpperCAmelCase = text[pos:end] if check_simbol(_snake_case ): result.append('''<KIGOU>''' ) elif checkuae(_snake_case ): result.append('''<U2000U2BFF>''' ) else: for i in wd.encode('''utf-8''' ): result.append('''<|byte%d|>''' % i ) UpperCAmelCase = end return result def snake_case_ ( self , _snake_case , _snake_case="\n" ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase = [] UpperCAmelCase = [] UpperCAmelCase = self.ids_to_tokens[index][0] if word[:6] == "<|byte" and word[-2:] == "|>": byte_tokens.append(int(word[6:-2] ) ) else: if len(_snake_case ) > 0: words.append(bytearray(_snake_case ).decode('''utf-8''' , errors='''replace''' ) ) UpperCAmelCase = [] if word[:7] == "<|emoji" and word[-2:] == "|>": words.append(self.emoji['''emoji_inv'''][word] ) elif word == "<SP>": words.append(''' ''' ) elif word == "<BR>": words.append(_snake_case ) elif word == "<TAB>": words.append('''\t''' ) elif word == "<BLOCK>": words.append('''▀''' ) elif word == "<KIGOU>": words.append('''ǀ''' ) elif word == "<U2000U2BFF>": words.append('''‖''' ) else: words.append(_snake_case ) if len(_snake_case ) > 0: words.append(bytearray(_snake_case ).decode('''utf-8''' , errors='''replace''' ) ) UpperCAmelCase = ''''''.join(_snake_case ) return text
152
0
"""simple docstring""" lowerCamelCase_ = { '''Pillow''': '''Pillow<10.0.0''', '''accelerate''': '''accelerate>=0.20.3''', '''av''': '''av==9.2.0''', '''beautifulsoup4''': '''beautifulsoup4''', '''black''': '''black~=23.1''', '''codecarbon''': '''codecarbon==1.2.0''', '''cookiecutter''': '''cookiecutter==1.7.3''', '''dataclasses''': '''dataclasses''', '''datasets''': '''datasets!=2.5.0''', '''decord''': '''decord==0.6.0''', '''deepspeed''': '''deepspeed>=0.9.3''', '''diffusers''': '''diffusers''', '''dill''': '''dill<0.3.5''', '''evaluate''': '''evaluate>=0.2.0''', '''fairscale''': '''fairscale>0.3''', '''faiss-cpu''': '''faiss-cpu''', '''fastapi''': '''fastapi''', '''filelock''': '''filelock''', '''flax''': '''flax>=0.4.1,<=0.7.0''', '''ftfy''': '''ftfy''', '''fugashi''': '''fugashi>=1.0''', '''GitPython''': '''GitPython<3.1.19''', '''hf-doc-builder''': '''hf-doc-builder>=0.3.0''', '''huggingface-hub''': '''huggingface-hub>=0.14.1,<1.0''', '''importlib_metadata''': '''importlib_metadata''', '''ipadic''': '''ipadic>=1.0.0,<2.0''', '''isort''': '''isort>=5.5.4''', '''jax''': '''jax>=0.2.8,!=0.3.2,<=0.4.13''', '''jaxlib''': '''jaxlib>=0.1.65,<=0.4.13''', '''jieba''': '''jieba''', '''kenlm''': '''kenlm''', '''keras-nlp''': '''keras-nlp>=0.3.1''', '''librosa''': '''librosa''', '''nltk''': '''nltk''', '''natten''': '''natten>=0.14.6''', '''numpy''': '''numpy>=1.17''', '''onnxconverter-common''': '''onnxconverter-common''', '''onnxruntime-tools''': '''onnxruntime-tools>=1.4.2''', '''onnxruntime''': '''onnxruntime>=1.4.0''', '''opencv-python''': '''opencv-python''', '''optuna''': '''optuna''', '''optax''': '''optax>=0.0.8,<=0.1.4''', '''packaging''': '''packaging>=20.0''', '''parameterized''': '''parameterized''', '''phonemizer''': '''phonemizer''', '''protobuf''': '''protobuf''', '''psutil''': '''psutil''', '''pyyaml''': '''pyyaml>=5.1''', '''pydantic''': '''pydantic<2''', '''pytest''': '''pytest>=7.2.0''', '''pytest-timeout''': '''pytest-timeout''', '''pytest-xdist''': '''pytest-xdist''', '''python''': '''python>=3.8.0''', '''ray[tune]''': '''ray[tune]''', '''regex''': '''regex!=2019.12.17''', '''requests''': '''requests''', '''rhoknp''': '''rhoknp>=1.1.0,<1.3.1''', '''rjieba''': '''rjieba''', '''rouge-score''': '''rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1''', '''ruff''': '''ruff>=0.0.241,<=0.0.259''', '''sacrebleu''': '''sacrebleu>=1.4.12,<2.0.0''', '''sacremoses''': '''sacremoses''', '''safetensors''': '''safetensors>=0.3.1''', '''sagemaker''': '''sagemaker>=2.31.0''', '''scikit-learn''': '''scikit-learn''', '''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''', '''sigopt''': '''sigopt''', '''starlette''': '''starlette''', '''sudachipy''': '''sudachipy>=0.6.6''', '''sudachidict_core''': '''sudachidict_core>=20220729''', '''tensorflow-cpu''': '''tensorflow-cpu>=2.6,<2.14''', '''tensorflow''': '''tensorflow>=2.6,<2.14''', '''tensorflow-text''': '''tensorflow-text<2.14''', '''tf2onnx''': '''tf2onnx''', '''timeout-decorator''': '''timeout-decorator''', '''timm''': '''timm''', '''tokenizers''': '''tokenizers>=0.11.1,!=0.11.3,<0.14''', '''torch''': '''torch>=1.9,!=1.12.0''', '''torchaudio''': '''torchaudio''', '''torchvision''': '''torchvision''', '''pyctcdecode''': '''pyctcdecode>=0.4.0''', '''tqdm''': '''tqdm>=4.27''', '''unidic''': '''unidic>=1.0.2''', '''unidic_lite''': '''unidic_lite>=1.0.7''', '''urllib3''': '''urllib3<2.0.0''', '''uvicorn''': '''uvicorn''', }
268
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''', '''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''', '''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''', '''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''', '''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''', '''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''', } class UpperCamelCase_ (__A ): __magic_name__ = '''rwkv''' __magic_name__ = {'''max_position_embeddings''': '''context_length'''} def __init__( self : str , lowerCAmelCase_ : str=50_277 , lowerCAmelCase_ : Optional[int]=1_024 , lowerCAmelCase_ : Optional[int]=4_096 , lowerCAmelCase_ : Union[str, Any]=32 , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[Any]=1e-5 , lowerCAmelCase_ : str=0 , lowerCAmelCase_ : Tuple=0 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Any=True , **lowerCAmelCase_ : List[Any] , ) -> List[str]: UpperCAmelCase_ : Tuple = vocab_size UpperCAmelCase_ : List[str] = context_length UpperCAmelCase_ : Dict = hidden_size UpperCAmelCase_ : Optional[int] = num_hidden_layers UpperCAmelCase_ : Optional[int] = attention_hidden_size if attention_hidden_size is not None else hidden_size UpperCAmelCase_ : Dict = intermediate_size if intermediate_size is not None else 4 * hidden_size UpperCAmelCase_ : Any = layer_norm_epsilon UpperCAmelCase_ : List[Any] = rescale_every UpperCAmelCase_ : List[str] = use_cache UpperCAmelCase_ : List[str] = bos_token_id UpperCAmelCase_ : Union[str, Any] = eos_token_id super().__init__( tie_word_embeddings=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
268
1
from __future__ import annotations from collections.abc import Generator def lowerCAmelCase_ (): """simple docstring""" UpperCAmelCase_: dict[int, int] = {} UpperCAmelCase_: List[Any] = 2 while True: UpperCAmelCase_: Optional[Any] = factor_map.pop(lowerCamelCase_ , lowerCamelCase_ ) if factor: UpperCAmelCase_: List[str] = factor + prime while x in factor_map: x += factor UpperCAmelCase_: str = factor else: UpperCAmelCase_: Union[str, Any] = prime yield prime prime += 1 def lowerCAmelCase_ (lowerCAmelCase__: Tuple = 1e10 ): """simple docstring""" UpperCAmelCase_: Union[str, Any] = sieve() UpperCAmelCase_: str = 1 while True: UpperCAmelCase_: Optional[Any] = next(lowerCamelCase_ ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(lowerCamelCase_ ) n += 2 if __name__ == "__main__": print(solution())
367
from __future__ import annotations def lowerCAmelCase_ (lowerCAmelCase__: list[int | float] , lowerCAmelCase__: int , lowerCAmelCase__: int ): """simple docstring""" if len(lowerCAmelCase__ ) == 0: raise ValueError("""find_max() arg is an empty sequence""" ) if ( left >= len(lowerCAmelCase__ ) or left < -len(lowerCAmelCase__ ) or right >= len(lowerCAmelCase__ ) or right < -len(lowerCAmelCase__ ) ): raise IndexError("""list index out of range""" ) if left == right: return nums[left] UpperCAmelCase_: int = (left + right) >> 1 # the middle UpperCAmelCase_: List[Any] = find_max(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # find max in range[left, mid] UpperCAmelCase_: Any = find_max(lowerCAmelCase__ , mid + 1 , lowerCAmelCase__ ) # find max in range[mid + 1, right] return left_max if left_max >= right_max else right_max if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
82
0
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def _UpperCAmelCase ( snake_case , snake_case ): """simple docstring""" _lowerCAmelCase = args.log_outputs _lowerCAmelCase = """_""".join(args.dataset.split("""/""" ) + [args.config, args.split] ) # load metric _lowerCAmelCase = load_metric("""wer""" ) _lowerCAmelCase = load_metric("""cer""" ) # compute metrics _lowerCAmelCase = wer.compute(references=result["""target"""] , predictions=result["""prediction"""] ) _lowerCAmelCase = cer.compute(references=result["""target"""] , predictions=result["""prediction"""] ) # print & log results _lowerCAmelCase = F'WER: {wer_result}\nCER: {cer_result}' print(snake_case ) with open(F'{dataset_id}_eval_results.txt' , """w""" ) as f: f.write(snake_case ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: _lowerCAmelCase = F'log_{dataset_id}_predictions.txt' _lowerCAmelCase = F'log_{dataset_id}_targets.txt' with open(snake_case , """w""" ) as p, open(snake_case , """w""" ) as t: # mapping function to write output def write_to_file(snake_case , snake_case ): p.write(F'{i}' + """\n""" ) p.write(batch["""prediction"""] + """\n""" ) t.write(F'{i}' + """\n""" ) t.write(batch["""target"""] + """\n""" ) result.map(snake_case , with_indices=snake_case ) def _UpperCAmelCase ( snake_case ): """simple docstring""" _lowerCAmelCase = """[,?.!\-\;\:\"“%‘”�—’…–]""" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training _lowerCAmelCase = re.sub(snake_case , """""" , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! _lowerCAmelCase = ["""\n\n""", """\n""", """ """, """ """] for t in token_sequences_to_ignore: _lowerCAmelCase = """ """.join(text.split(snake_case ) ) return text def _UpperCAmelCase ( snake_case ): """simple docstring""" _lowerCAmelCase = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor _lowerCAmelCase = AutoFeatureExtractor.from_pretrained(args.model_id ) _lowerCAmelCase = feature_extractor.sampling_rate # resample audio _lowerCAmelCase = dataset.cast_column("""audio""" , Audio(sampling_rate=snake_case ) ) # load eval pipeline if args.device is None: _lowerCAmelCase = 0 if torch.cuda.is_available() else -1 _lowerCAmelCase = pipeline("""automatic-speech-recognition""" , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(snake_case ): _lowerCAmelCase = asr( batch["""audio"""]["""array"""] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) _lowerCAmelCase = prediction["""text"""] _lowerCAmelCase = normalize_text(batch["""sentence"""] ) return batch # run inference on all examples _lowerCAmelCase = dataset.map(snake_case , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(snake_case , snake_case ) if __name__ == "__main__": A__ = argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) A__ = parser.parse_args() main(args)
82
'''simple docstring''' def __UpperCAmelCase ( a_: int = 50 ): _UpperCAmelCase : str = [1] * (length + 1) for row_length in range(3, length + 1 ): for block_length in range(3, row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(f'{solution() = }')
145
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""} class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ): '''simple docstring''' _lowercase : str = '''ctrl''' _lowercase : Union[str, Any] = ['''past_key_values'''] _lowercase : List[Any] = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self , _lowercase=246_534 , _lowercase=256 , _lowercase=1_280 , _lowercase=8_192 , _lowercase=48 , _lowercase=16 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1e-6 , _lowercase=0.02 , _lowercase=True , **_lowercase , ): """simple docstring""" _lowerCAmelCase = vocab_size _lowerCAmelCase = n_positions _lowerCAmelCase = n_embd _lowerCAmelCase = n_layer _lowerCAmelCase = n_head _lowerCAmelCase = dff _lowerCAmelCase = resid_pdrop _lowerCAmelCase = embd_pdrop _lowerCAmelCase = layer_norm_epsilon _lowerCAmelCase = initializer_range _lowerCAmelCase = use_cache super().__init__(**_lowercase )
229
'''simple docstring''' import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py _lowercase = """src/transformers""" # This is to make sure the transformers module imported is the one in the repo. _lowercase = direct_transformers_import(PATH_TO_TRANSFORMERS) _lowercase = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` _lowercase = re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""") _lowercase = { """DecisionTransformerConfig""", """EncoderDecoderConfig""", """MusicgenConfig""", """RagConfig""", """SpeechEncoderDecoderConfig""", """TimmBackboneConfig""", """VisionEncoderDecoderConfig""", """VisionTextDualEncoderConfig""", """LlamaConfig""", } def A (__lowerCamelCase :str ): _lowerCAmelCase = None # source code of `config_class` _lowerCAmelCase = inspect.getsource(__lowerCamelCase ) _lowerCAmelCase = _re_checkpoint.findall(__lowerCamelCase ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith("""/""" ): _lowerCAmelCase = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link _lowerCAmelCase = f'https://huggingface.co/{ckpt_name}' if ckpt_link == ckpt_link_from_name: _lowerCAmelCase = ckpt_name break return checkpoint def A (): _lowerCAmelCase = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue _lowerCAmelCase = get_checkpoint_from_config_class(__lowerCamelCase ) _lowerCAmelCase = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(__lowerCamelCase ) if len(__lowerCamelCase ) > 0: _lowerCAmelCase = """\n""".join(sorted(__lowerCamelCase ) ) raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
229
1
"""simple docstring""" import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging _lowercase : str = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): '''simple docstring''' _a = 'linear' _a = 'cosine' _a = 'cosine_with_restarts' _a = 'polynomial' _a = 'constant' _a = 'constant_with_warmup' _a = 'piecewise_constant' def snake_case__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int = -1 ): """simple docstring""" return LambdaLR(__lowerCamelCase , lambda __lowerCamelCase : 1 , last_epoch=__lowerCamelCase ) def snake_case__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int = -1 ): """simple docstring""" def lr_lambda(__lowerCamelCase : int ): if current_step < num_warmup_steps: return float(__lowerCamelCase ) / float(max(1.0 , __lowerCamelCase ) ) return 1.0 return LambdaLR(__lowerCamelCase , __lowerCamelCase , last_epoch=__lowerCamelCase ) def snake_case__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : str , __lowerCamelCase : int = -1 ): """simple docstring""" lowerCamelCase__ : List[Any] ={} lowerCamelCase__ : Tuple =step_rules.split(''',''' ) for rule_str in rule_list[:-1]: lowerCamelCase__ , lowerCamelCase__ : List[Any] =rule_str.split(''':''' ) lowerCamelCase__ : Any =int(__lowerCamelCase ) lowerCamelCase__ : int =float(__lowerCamelCase ) lowerCamelCase__ : Tuple =value lowerCamelCase__ : Union[str, Any] =float(rule_list[-1] ) def create_rules_function(__lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] ): def rule_func(__lowerCamelCase : int ) -> float: lowerCamelCase__ : Optional[int] =sorted(rules_dict.keys() ) for i, sorted_step in enumerate(__lowerCamelCase ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func lowerCamelCase__ : Any =create_rules_function(__lowerCamelCase , __lowerCamelCase ) return LambdaLR(__lowerCamelCase , __lowerCamelCase , last_epoch=__lowerCamelCase ) def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any]=-1 ): """simple docstring""" def lr_lambda(__lowerCamelCase : int ): if current_step < num_warmup_steps: return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) ) return max( 0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) ) return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def snake_case__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float = 0.5 , __lowerCamelCase : int = -1 ): """simple docstring""" def lr_lambda(__lowerCamelCase : Dict ): if current_step < num_warmup_steps: return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) ) lowerCamelCase__ : Tuple =float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__lowerCamelCase ) * 2.0 * progress )) ) return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def snake_case__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int = 1 , __lowerCamelCase : int = -1 ): """simple docstring""" def lr_lambda(__lowerCamelCase : Union[str, Any] ): if current_step < num_warmup_steps: return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) ) lowerCamelCase__ : Dict =float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__lowerCamelCase ) * progress) % 1.0) )) ) return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def snake_case__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : List[str]=1e-7 , __lowerCamelCase : List[Any]=1.0 , __lowerCamelCase : Any=-1 ): """simple docstring""" lowerCamelCase__ : List[Any] =optimizer.defaults['''lr'''] if not (lr_init > lr_end): raise ValueError(f'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' ) def lr_lambda(__lowerCamelCase : int ): if current_step < num_warmup_steps: return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: lowerCamelCase__ : Optional[int] =lr_init - lr_end lowerCamelCase__ : Union[str, Any] =num_training_steps - num_warmup_steps lowerCamelCase__ : Optional[Any] =1 - (current_step - num_warmup_steps) / decay_steps lowerCamelCase__ : str =lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) _lowercase : Any = { SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def snake_case__ ( __lowerCamelCase : Union[str, SchedulerType] , __lowerCamelCase : Optimizer , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 1 , __lowerCamelCase : float = 1.0 , __lowerCamelCase : int = -1 , ): """simple docstring""" lowerCamelCase__ : int =SchedulerType(__lowerCamelCase ) lowerCamelCase__ : Dict =TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(__lowerCamelCase , last_epoch=__lowerCamelCase ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(__lowerCamelCase , step_rules=__lowerCamelCase , last_epoch=__lowerCamelCase ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(f'''{name} requires `num_warmup_steps`, please provide that argument.''' ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(__lowerCamelCase , num_warmup_steps=__lowerCamelCase , last_epoch=__lowerCamelCase ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(f'''{name} requires `num_training_steps`, please provide that argument.''' ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( __lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , num_cycles=__lowerCamelCase , last_epoch=__lowerCamelCase , ) if name == SchedulerType.POLYNOMIAL: return schedule_func( __lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , power=__lowerCamelCase , last_epoch=__lowerCamelCase , ) return schedule_func( __lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , last_epoch=__lowerCamelCase )
238
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType _lowercase : List[str] = logging.get_logger(__name__) _lowercase : int = { "microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json", "microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json", "microsoft/deberta-v2-xlarge-mnli": ( "https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json" ), "microsoft/deberta-v2-xxlarge-mnli": ( "https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json" ), } class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): '''simple docstring''' _a = 'deberta-v2' def __init__( self : Optional[Any], lowerCamelCase : Optional[int]=12_8100, lowerCamelCase : List[Any]=1536, lowerCamelCase : Dict=24, lowerCamelCase : Any=24, lowerCamelCase : Union[str, Any]=6144, lowerCamelCase : List[Any]="gelu", lowerCamelCase : int=0.1, lowerCamelCase : Union[str, Any]=0.1, lowerCamelCase : Union[str, Any]=512, lowerCamelCase : Optional[Any]=0, lowerCamelCase : Any=0.02, lowerCamelCase : int=1E-7, lowerCamelCase : Union[str, Any]=False, lowerCamelCase : Union[str, Any]=-1, lowerCamelCase : Tuple=0, lowerCamelCase : Union[str, Any]=True, lowerCamelCase : int=None, lowerCamelCase : Dict=0, lowerCamelCase : Tuple="gelu", **lowerCamelCase : Optional[int], )-> Union[str, Any]: super().__init__(**lowerCamelCase ) lowerCamelCase__ : str =hidden_size lowerCamelCase__ : Optional[int] =num_hidden_layers lowerCamelCase__ : Optional[Any] =num_attention_heads lowerCamelCase__ : List[Any] =intermediate_size lowerCamelCase__ : int =hidden_act lowerCamelCase__ : Tuple =hidden_dropout_prob lowerCamelCase__ : Union[str, Any] =attention_probs_dropout_prob lowerCamelCase__ : Optional[Any] =max_position_embeddings lowerCamelCase__ : int =type_vocab_size lowerCamelCase__ : Tuple =initializer_range lowerCamelCase__ : Tuple =relative_attention lowerCamelCase__ : Optional[Any] =max_relative_positions lowerCamelCase__ : List[Any] =pad_token_id lowerCamelCase__ : int =position_biased_input # Backwards compatibility if type(lowerCamelCase ) == str: lowerCamelCase__ : Union[str, Any] =[x.strip() for x in pos_att_type.lower().split('''|''' )] lowerCamelCase__ : Tuple =pos_att_type lowerCamelCase__ : Union[str, Any] =vocab_size lowerCamelCase__ : Optional[int] =layer_norm_eps lowerCamelCase__ : Dict =kwargs.get('''pooler_hidden_size''', lowerCamelCase ) lowerCamelCase__ : Tuple =pooler_dropout lowerCamelCase__ : List[Any] =pooler_hidden_act class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): '''simple docstring''' @property def snake_case ( self : List[str] )-> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowerCamelCase__ : Union[str, Any] ={0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: lowerCamelCase__ : Any ={0: '''batch''', 1: '''sequence'''} if self._config.type_vocab_size > 0: return OrderedDict( [('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] ) else: return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] ) @property def snake_case ( self : List[str] )-> int: return 12 def snake_case ( self : str, lowerCamelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"], lowerCamelCase : int = -1, lowerCamelCase : int = -1, lowerCamelCase : int = -1, lowerCamelCase : bool = False, lowerCamelCase : Optional["TensorType"] = None, lowerCamelCase : int = 3, lowerCamelCase : int = 40, lowerCamelCase : int = 40, lowerCamelCase : "PreTrainedTokenizerBase" = None, )-> Mapping[str, Any]: lowerCamelCase__ : List[Any] =super().generate_dummy_inputs(preprocessor=lowerCamelCase, framework=lowerCamelCase ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
238
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase : Dict = logging.get_logger(__name__) class lowerCamelCase (__lowerCamelCase ): """simple docstring""" UpperCAmelCase_ = "encoder-decoder" UpperCAmelCase_ = True def __init__( self : Optional[int], **_UpperCAmelCase : Tuple ) -> Optional[int]: """simple docstring""" super().__init__(**_UpperCAmelCase ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" SCREAMING_SNAKE_CASE__ : Dict = kwargs.pop("encoder" ) SCREAMING_SNAKE_CASE__ : Any = encoder_config.pop("model_type" ) SCREAMING_SNAKE_CASE__ : List[str] = kwargs.pop("decoder" ) SCREAMING_SNAKE_CASE__ : Dict = decoder_config.pop("model_type" ) from ..auto.configuration_auto import AutoConfig SCREAMING_SNAKE_CASE__ : List[str] = AutoConfig.for_model(_UpperCAmelCase, **_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Any = AutoConfig.for_model(_UpperCAmelCase, **_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Any = True @classmethod def A_ ( cls : Tuple, _UpperCAmelCase : PretrainedConfig, _UpperCAmelCase : PretrainedConfig, **_UpperCAmelCase : Optional[int] ) -> PretrainedConfig: """simple docstring""" logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = True SCREAMING_SNAKE_CASE__ : Tuple = True return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **_UpperCAmelCase ) def A_ ( self : Tuple ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.encoder.to_dict() SCREAMING_SNAKE_CASE__ : List[Any] = self.decoder.to_dict() SCREAMING_SNAKE_CASE__ : List[str] = self.__class__.model_type return output
191
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowerCamelCase : int = logging.get_logger(__name__) _lowerCamelCase : Optional[Any] = '''▁''' _lowerCamelCase : Dict = {'''vocab_file''': '''sentencepiece.bpe.model'''} _lowerCamelCase : int = { '''vocab_file''': { '''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model''', '''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model''', '''xlm-roberta-large-finetuned-conll02-dutch''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll02-spanish''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll03-english''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll03-german''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model''' ), } } _lowerCamelCase : Optional[Any] = { '''xlm-roberta-base''': 5_1_2, '''xlm-roberta-large''': 5_1_2, '''xlm-roberta-large-finetuned-conll02-dutch''': 5_1_2, '''xlm-roberta-large-finetuned-conll02-spanish''': 5_1_2, '''xlm-roberta-large-finetuned-conll03-english''': 5_1_2, '''xlm-roberta-large-finetuned-conll03-german''': 5_1_2, } class lowerCamelCase (__lowerCamelCase ): """simple docstring""" UpperCAmelCase_ = VOCAB_FILES_NAMES UpperCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase_ = ["input_ids", "attention_mask"] def __init__( self : Dict, _UpperCAmelCase : str, _UpperCAmelCase : Optional[int]="<s>", _UpperCAmelCase : Optional[int]="</s>", _UpperCAmelCase : Dict="</s>", _UpperCAmelCase : List[Any]="<s>", _UpperCAmelCase : Union[str, Any]="<unk>", _UpperCAmelCase : List[Any]="<pad>", _UpperCAmelCase : str="<mask>", _UpperCAmelCase : Optional[Dict[str, Any]] = None, **_UpperCAmelCase : List[Any], ) -> None: """simple docstring""" # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE__ : int = AddedToken(_UpperCAmelCase, lstrip=_UpperCAmelCase, rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase, _UpperCAmelCase ) else mask_token SCREAMING_SNAKE_CASE__ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_UpperCAmelCase, eos_token=_UpperCAmelCase, unk_token=_UpperCAmelCase, sep_token=_UpperCAmelCase, cls_token=_UpperCAmelCase, pad_token=_UpperCAmelCase, mask_token=_UpperCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **_UpperCAmelCase, ) SCREAMING_SNAKE_CASE__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_UpperCAmelCase ) ) SCREAMING_SNAKE_CASE__ : Tuple = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token SCREAMING_SNAKE_CASE__ : List[str] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab SCREAMING_SNAKE_CASE__ : Dict = 1 SCREAMING_SNAKE_CASE__ : int = len(self.sp_model ) + self.fairseq_offset SCREAMING_SNAKE_CASE__ : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : str ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = self.__dict__.copy() SCREAMING_SNAKE_CASE__ : List[Any] = None SCREAMING_SNAKE_CASE__ : Dict = self.sp_model.serialized_model_proto() return state def __setstate__( self : int, _UpperCAmelCase : List[Any] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = d # for backward compatibility if not hasattr(self, "sp_model_kwargs" ): SCREAMING_SNAKE_CASE__ : Dict = {} SCREAMING_SNAKE_CASE__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def A_ ( self : Any, _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] SCREAMING_SNAKE_CASE__ : List[str] = [self.cls_token_id] SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def A_ ( self : List[Any], _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None, _UpperCAmelCase : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase, token_ids_a=_UpperCAmelCase, already_has_special_tokens=_UpperCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(_UpperCAmelCase )) + [1] return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase )) + [1] def A_ ( self : Union[str, Any], _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = [self.sep_token_id] SCREAMING_SNAKE_CASE__ : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def A_ ( self : List[str] ) -> List[str]: """simple docstring""" return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token def A_ ( self : List[Any] ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def A_ ( self : List[str], _UpperCAmelCase : str ) -> List[str]: """simple docstring""" return self.sp_model.encode(_UpperCAmelCase, out_type=_UpperCAmelCase ) def A_ ( self : Optional[Any], _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.sp_model.PieceToId(_UpperCAmelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def A_ ( self : Tuple, _UpperCAmelCase : List[str] ) -> List[str]: """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def A_ ( self : Any, _UpperCAmelCase : int ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = "".join(_UpperCAmelCase ).replace(_UpperCAmelCase, " " ).strip() return out_string def A_ ( self : Union[str, Any], _UpperCAmelCase : str, _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(_UpperCAmelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join( _UpperCAmelCase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file, _UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCAmelCase, "wb" ) as fi: SCREAMING_SNAKE_CASE__ : Any = self.sp_model.serialized_model_proto() fi.write(_UpperCAmelCase ) return (out_vocab_file,)
191
1
'''simple docstring''' import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class lowercase__ ( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase_ ( self ): _SCREAMING_SNAKE_CASE : Union[str, Any] = 0 def UpperCAmelCase_ ( self ): _SCREAMING_SNAKE_CASE : Union[str, Any] = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" ) self.assertIsInstance(__snake_case , __snake_case ) def UpperCAmelCase_ ( self ): with tempfile.TemporaryDirectory() as tmpdirname: _SCREAMING_SNAKE_CASE : List[str] = Path(__snake_case ) / """preprocessor_config.json""" _SCREAMING_SNAKE_CASE : Tuple = Path(__snake_case ) / """config.json""" json.dump( {"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) ) _SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) def UpperCAmelCase_ ( self ): # Ensure we can load the image processor from the feature extractor config with tempfile.TemporaryDirectory() as tmpdirname: _SCREAMING_SNAKE_CASE : List[str] = Path(__snake_case ) / """preprocessor_config.json""" _SCREAMING_SNAKE_CASE : Tuple = Path(__snake_case ) / """config.json""" json.dump( {"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) ) _SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) def UpperCAmelCase_ ( self ): with tempfile.TemporaryDirectory() as tmpdirname: _SCREAMING_SNAKE_CASE : Optional[Any] = CLIPConfig() # Create a dummy config file with image_proceesor_type _SCREAMING_SNAKE_CASE : int = Path(__snake_case ) / """preprocessor_config.json""" _SCREAMING_SNAKE_CASE : List[Any] = Path(__snake_case ) / """config.json""" json.dump( {"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally _SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained(__snake_case ).to_dict() config_dict.pop("""image_processor_type""" ) _SCREAMING_SNAKE_CASE : List[str] = CLIPImageProcessor(**__snake_case ) # save in new folder model_config.save_pretrained(__snake_case ) config.save_pretrained(__snake_case ) _SCREAMING_SNAKE_CASE : Tuple = AutoImageProcessor.from_pretrained(__snake_case ) # make sure private variable is not incorrectly saved _SCREAMING_SNAKE_CASE : List[str] = json.loads(config.to_json_string() ) self.assertTrue("""_processor_class""" not in dict_as_saved ) self.assertIsInstance(__snake_case , __snake_case ) def UpperCAmelCase_ ( self ): with tempfile.TemporaryDirectory() as tmpdirname: _SCREAMING_SNAKE_CASE : str = Path(__snake_case ) / """preprocessor_config.json""" json.dump( {"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , ) _SCREAMING_SNAKE_CASE : int = AutoImageProcessor.from_pretrained(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) def UpperCAmelCase_ ( self ): with self.assertRaisesRegex( __snake_case , """clip-base is not a local folder and is not a valid model identifier""" ): _SCREAMING_SNAKE_CASE : Tuple = AutoImageProcessor.from_pretrained("""clip-base""" ) def UpperCAmelCase_ ( self ): with self.assertRaisesRegex( __snake_case , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): _SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained(__snake_case , revision="""aaaaaa""" ) def UpperCAmelCase_ ( self ): with self.assertRaisesRegex( __snake_case , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ): _SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" ) def UpperCAmelCase_ ( self ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(__snake_case ): _SCREAMING_SNAKE_CASE : Union[str, Any] = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(__snake_case ): _SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case ) _SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(__snake_case ) _SCREAMING_SNAKE_CASE : List[str] = AutoImageProcessor.from_pretrained(__snake_case , trust_remote_code=__snake_case ) self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" ) def UpperCAmelCase_ ( self ): try: AutoConfig.register("""custom""" , __snake_case ) AutoImageProcessor.register(__snake_case , __snake_case ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__snake_case ): AutoImageProcessor.register(__snake_case , __snake_case ) with tempfile.TemporaryDirectory() as tmpdirname: _SCREAMING_SNAKE_CASE : Any = Path(__snake_case ) / """preprocessor_config.json""" _SCREAMING_SNAKE_CASE : str = Path(__snake_case ) / """config.json""" json.dump( {"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) ) _SCREAMING_SNAKE_CASE : List[str] = CustomImageProcessor.from_pretrained(__snake_case ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(__snake_case ) _SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def UpperCAmelCase_ ( self ): class lowercase__ ( _snake_case ): '''simple docstring''' A_ : Dict = True try: AutoConfig.register("""custom""" , __snake_case ) AutoImageProcessor.register(__snake_case , __snake_case ) # If remote code is not set, the default is to use local _SCREAMING_SNAKE_CASE : Tuple = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. _SCREAMING_SNAKE_CASE : int = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub _SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) self.assertTrue(not hasattr(__snake_case , """is_local""" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
200
'''simple docstring''' from typing import Any class lowercase__ : '''simple docstring''' def __init__( self , __snake_case ): _SCREAMING_SNAKE_CASE : Dict = data _SCREAMING_SNAKE_CASE : Optional[int] = None def __repr__( self ): return f"""Node({self.data})""" class lowercase__ : '''simple docstring''' def __init__( self ): _SCREAMING_SNAKE_CASE : Any = None def __iter__( self ): _SCREAMING_SNAKE_CASE : Any = self.head while node: yield node.data _SCREAMING_SNAKE_CASE : List[Any] = node.next def __len__( self ): return sum(1 for _ in self ) def __repr__( self ): return "->".join([str(__snake_case ) for item in self] ) def __getitem__( self , __snake_case ): if not 0 <= index < len(self ): raise ValueError("""list index out of range.""" ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self , __snake_case , __snake_case ): if not 0 <= index < len(self ): raise ValueError("""list index out of range.""" ) _SCREAMING_SNAKE_CASE : Any = self.head for _ in range(__snake_case ): _SCREAMING_SNAKE_CASE : List[Any] = current.next _SCREAMING_SNAKE_CASE : Dict = data def UpperCAmelCase_ ( self , __snake_case ): self.insert_nth(len(self ) , __snake_case ) def UpperCAmelCase_ ( self , __snake_case ): self.insert_nth(0 , __snake_case ) def UpperCAmelCase_ ( self , __snake_case , __snake_case ): if not 0 <= index <= len(self ): raise IndexError("""list index out of range""" ) _SCREAMING_SNAKE_CASE : Optional[int] = Node(__snake_case ) if self.head is None: _SCREAMING_SNAKE_CASE : str = new_node elif index == 0: _SCREAMING_SNAKE_CASE : Tuple = self.head # link new_node to head _SCREAMING_SNAKE_CASE : str = new_node else: _SCREAMING_SNAKE_CASE : Tuple = self.head for _ in range(index - 1 ): _SCREAMING_SNAKE_CASE : List[str] = temp.next _SCREAMING_SNAKE_CASE : Tuple = temp.next _SCREAMING_SNAKE_CASE : Dict = new_node def UpperCAmelCase_ ( self ): # print every node data print(self ) def UpperCAmelCase_ ( self ): return self.delete_nth(0 ) def UpperCAmelCase_ ( self ): # delete from tail return self.delete_nth(len(self ) - 1 ) def UpperCAmelCase_ ( self , __snake_case = 0 ): if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError("""List index out of range.""" ) _SCREAMING_SNAKE_CASE : Tuple = self.head # default first node if index == 0: _SCREAMING_SNAKE_CASE : List[Any] = self.head.next else: _SCREAMING_SNAKE_CASE : Tuple = self.head for _ in range(index - 1 ): _SCREAMING_SNAKE_CASE : Any = temp.next _SCREAMING_SNAKE_CASE : Any = temp.next _SCREAMING_SNAKE_CASE : Union[str, Any] = temp.next.next return delete_node.data def UpperCAmelCase_ ( self ): return self.head is None def UpperCAmelCase_ ( self ): _SCREAMING_SNAKE_CASE : Dict = None _SCREAMING_SNAKE_CASE : int = self.head while current: # Store the current node's next node. _SCREAMING_SNAKE_CASE : List[Any] = current.next # Make the current node's next point backwards _SCREAMING_SNAKE_CASE : Tuple = prev # Make the previous node be the current node _SCREAMING_SNAKE_CASE : int = current # Make the current node the next node (to progress iteration) _SCREAMING_SNAKE_CASE : Any = next_node # Return prev in order to put the head at the end _SCREAMING_SNAKE_CASE : Union[str, Any] = prev def snake_case_ ( ): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = LinkedList() assert linked_list.is_empty() is True assert str(SCREAMING_SNAKE_CASE__ ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(SCREAMING_SNAKE_CASE__ ) == i linked_list.insert_nth(SCREAMING_SNAKE_CASE__ , i + 1 ) assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(1 , 11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(0 , 12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(SCREAMING_SNAKE_CASE__ ) == 9 assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(1 , 10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True for i in range(0 , 9 ): _SCREAMING_SNAKE_CASE : List[str] = -i assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True linked_list.reverse() assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(-8 , 1 ) ) def snake_case_ ( ): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = [ -9, 100, Node(7734_5112 ), """dlrow olleH""", 7, 5555, 0, -1_9_2.5_5_5_5_5, """Hello, world!""", 7_7.9, Node(10 ), None, None, 1_2.2_0, ] _SCREAMING_SNAKE_CASE : Optional[int] = LinkedList() for i in test_input: linked_list.insert_tail(SCREAMING_SNAKE_CASE__ ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(SCREAMING_SNAKE_CASE__ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head _SCREAMING_SNAKE_CASE : Any = linked_list.delete_head() assert result == -9 assert ( str(SCREAMING_SNAKE_CASE__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail _SCREAMING_SNAKE_CASE : List[Any] = linked_list.delete_tail() assert result == 1_2.2 assert ( str(SCREAMING_SNAKE_CASE__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list _SCREAMING_SNAKE_CASE : Optional[int] = linked_list.delete_nth(10 ) assert result is None assert ( str(SCREAMING_SNAKE_CASE__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node("""Hello again, world!""" ) ) assert ( str(SCREAMING_SNAKE_CASE__ ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(SCREAMING_SNAKE_CASE__ ) assert ( str(SCREAMING_SNAKE_CASE__ ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(SCREAMING_SNAKE_CASE__ ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def snake_case_ ( ): """simple docstring""" from doctest import testmod testmod() _SCREAMING_SNAKE_CASE : Optional[int] = LinkedList() linked_list.insert_head(input("""Inserting 1st at head """ ).strip() ) linked_list.insert_head(input("""Inserting 2nd at head """ ).strip() ) print("""\nPrint list:""" ) linked_list.print_list() linked_list.insert_tail(input("""\nInserting 1st at tail """ ).strip() ) linked_list.insert_tail(input("""Inserting 2nd at tail """ ).strip() ) print("""\nPrint list:""" ) linked_list.print_list() print("""\nDelete head""" ) linked_list.delete_head() print("""Delete tail""" ) linked_list.delete_tail() print("""\nPrint list:""" ) linked_list.print_list() print("""\nReverse linked list""" ) linked_list.reverse() print("""\nPrint list:""" ) linked_list.print_list() print("""\nString representation of linked list:""" ) print(SCREAMING_SNAKE_CASE__ ) print("""\nReading/changing Node data using indexing:""" ) print(f"""Element at Position 1: {linked_list[1]}""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = input("""Enter New Value: """ ).strip() print("""New list:""" ) print(SCREAMING_SNAKE_CASE__ ) print(f"""length of linked_list is : {len(SCREAMING_SNAKE_CASE__ )}""" ) if __name__ == "__main__": main()
200
1
"""simple docstring""" import argparse import torch from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Optional[int] ) -> Dict: '''simple docstring''' if openai_config_file == "": lowercase = OpenAIGPTConfig() else: lowercase = OpenAIGPTConfig.from_json_file(lowerCAmelCase__ ) lowercase = OpenAIGPTModel(lowerCAmelCase__ ) # Load weights from numpy load_tf_weights_in_openai_gpt(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # Save pytorch-model lowercase = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME lowercase = pytorch_dump_folder_path + """/""" + CONFIG_NAME print(f'Save PyTorch model to {pytorch_weights_dump_path}' ) torch.save(model.state_dict() , lowerCAmelCase__ ) print(f'Save configuration file to {pytorch_config_dump_path}' ) with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": __lowerCAmelCase : List[str] =argparse.ArgumentParser() # Required parameters parser.add_argument( """--openai_checkpoint_folder_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--openai_config_file""", default="""""", type=str, help=( """An optional config json file corresponding to the pre-trained OpenAI model. \n""" """This specifies the model architecture.""" ), ) __lowerCAmelCase : str =parser.parse_args() convert_openai_checkpoint_to_pytorch( args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path )
32
"""simple docstring""" def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> int: '''simple docstring''' if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): raise TypeError("""only integers accepted as input""" ) else: lowercase = str(abs(lowerCAmelCase__ ) ) lowercase = [list(lowerCAmelCase__ ) for char in range(len(lowerCAmelCase__ ) )] for index in range(len(lowerCAmelCase__ ) ): num_transpositions[index].pop(lowerCAmelCase__ ) return max( int("""""".join(list(lowerCAmelCase__ ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__("""doctest""").testmod()
32
1
import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset a_ : List[Any] = 'bert-base-cased' a_ : List[str] = 'google/pegasus-xsum' a_ : List[str] = [' Sam ate lunch today.', 'Sams lunch ingredients.'] a_ : List[str] = ['A very interesting story about what I ate for lunch.', 'Avocado, celery, turkey, coffee'] a_ : Union[str, Any] = 'patrickvonplaten/t5-tiny-random' a_ : Optional[Any] = 'sshleifer/bart-tiny-random' a_ : int = 'sshleifer/tiny-mbart' a_ : Union[str, Any] = 'sshleifer/tiny-marian-en-de' def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = '\n'.join(_UpperCAmelCase) Path(_UpperCAmelCase).open('w').writelines(_UpperCAmelCase) def lowerCamelCase__ (_UpperCAmelCase): for split in ["train", "val", "test"]: _dump_articles(os.path.join(_UpperCAmelCase , F'''{split}.source''') , _UpperCAmelCase) _dump_articles(os.path.join(_UpperCAmelCase , F'''{split}.target''') , _UpperCAmelCase) return tmp_dir class _snake_case ( A__ ): @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) @slow def SCREAMING_SNAKE_CASE__ ( self , a) -> Optional[int]: SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(a) SCREAMING_SNAKE_CASE = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) SCREAMING_SNAKE_CASE = max(len(tokenizer.encode(a)) for a in ARTICLES) SCREAMING_SNAKE_CASE = max(len(tokenizer.encode(a)) for a in SUMMARIES) SCREAMING_SNAKE_CASE = 4 SCREAMING_SNAKE_CASE = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error. SCREAMING_SNAKE_CASE = SeqaSeqDataset( a , data_dir=a , type_path='train' , max_source_length=a , max_target_length=a , src_lang=a , tgt_lang=a , ) SCREAMING_SNAKE_CASE = DataLoader(a , batch_size=2 , collate_fn=train_dataset.collate_fn) for batch in dataloader: assert isinstance(a , a) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place SCREAMING_SNAKE_CASE = shift_tokens_right(batch['labels'] , tokenizer.pad_token_id) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED]) def SCREAMING_SNAKE_CASE__ ( self , a) -> int: SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(a) SCREAMING_SNAKE_CASE = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) SCREAMING_SNAKE_CASE = max(len(tokenizer.encode(a)) for a in ARTICLES) SCREAMING_SNAKE_CASE = max(len(tokenizer.encode(a)) for a in SUMMARIES) SCREAMING_SNAKE_CASE = 4 SCREAMING_SNAKE_CASE = LegacySeqaSeqDataset( a , data_dir=a , type_path='train' , max_source_length=20 , max_target_length=a , ) SCREAMING_SNAKE_CASE = DataLoader(a , batch_size=2 , collate_fn=train_dataset.collate_fn) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 20 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]: SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25') SCREAMING_SNAKE_CASE = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())) SCREAMING_SNAKE_CASE = tmp_dir.joinpath('train.source').open().readlines() SCREAMING_SNAKE_CASE = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())) pack_data_dir(a , a , 128 , a) SCREAMING_SNAKE_CASE = {x.name for x in tmp_dir.iterdir()} SCREAMING_SNAKE_CASE = {x.name for x in save_dir.iterdir()} SCREAMING_SNAKE_CASE = save_dir.joinpath('train.source').open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(a) < len(a) assert len(a) == 1 assert len(packed_examples[0]) == sum(len(a) for x in orig_examples) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq') def SCREAMING_SNAKE_CASE__ ( self) -> str: if not FAIRSEQ_AVAILABLE: return SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._get_dataset(max_len=64) SCREAMING_SNAKE_CASE = 64 SCREAMING_SNAKE_CASE = ds.make_dynamic_sampler(a , required_batch_size_multiple=a) SCREAMING_SNAKE_CASE = [len(a) for x in batch_sampler] assert len(set(a)) > 1 # it's not dynamic batch size if every batch is the same length assert sum(a) == len(a) # no dropped or added examples SCREAMING_SNAKE_CASE = DataLoader(a , batch_sampler=a , collate_fn=ds.collate_fn , num_workers=2) SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] for batch in data_loader: SCREAMING_SNAKE_CASE = batch['input_ids'].shape SCREAMING_SNAKE_CASE = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple SCREAMING_SNAKE_CASE = np.product(batch['input_ids'].shape) num_src_per_batch.append(a) if num_src_tokens > (max_tokens * 1.1): failures.append(a) assert num_src_per_batch[0] == max(a) if failures: raise AssertionError(f'''too many tokens in {len(a)} batches''') def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._get_dataset(max_len=512) SCREAMING_SNAKE_CASE = 2 SCREAMING_SNAKE_CASE = ds.make_sortish_sampler(a , shuffle=a) SCREAMING_SNAKE_CASE = DataLoader(a , batch_size=a , collate_fn=ds.collate_fn , num_workers=2) SCREAMING_SNAKE_CASE = DataLoader(a , batch_size=a , collate_fn=ds.collate_fn , num_workers=2 , sampler=a) SCREAMING_SNAKE_CASE = tokenizer.pad_token_id def count_pad_tokens(a , a="input_ids"): return [batch[k].eq(a).sum().item() for batch in data_loader] assert sum(count_pad_tokens(a , k='labels')) < sum(count_pad_tokens(a , k='labels')) assert sum(count_pad_tokens(a)) < sum(count_pad_tokens(a)) assert len(a) == len(a) def SCREAMING_SNAKE_CASE__ ( self , a=1000 , a=128) -> int: if os.getenv('USE_REAL_DATA' , a): SCREAMING_SNAKE_CASE = 'examples/seq2seq/wmt_en_ro' SCREAMING_SNAKE_CASE = max_len * 2 * 64 if not Path(a).joinpath('train.len').exists(): save_len_file(a , a) else: SCREAMING_SNAKE_CASE = 'examples/seq2seq/test_data/wmt_en_ro' SCREAMING_SNAKE_CASE = max_len * 4 save_len_file(a , a) SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(a) SCREAMING_SNAKE_CASE = SeqaSeqDataset( a , data_dir=a , type_path='train' , max_source_length=a , max_target_length=a , n_obs=a , ) return ds, max_tokens, tokenizer def SCREAMING_SNAKE_CASE__ ( self) -> int: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._get_dataset() SCREAMING_SNAKE_CASE = set(DistributedSortishSampler(a , 256 , num_replicas=2 , rank=0 , add_extra_examples=a)) SCREAMING_SNAKE_CASE = set(DistributedSortishSampler(a , 256 , num_replicas=2 , rank=1 , add_extra_examples=a)) assert idsa.intersection(a) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) def SCREAMING_SNAKE_CASE__ ( self , a) -> List[Any]: SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(a , use_fast=a) if tok_name == MBART_TINY: SCREAMING_SNAKE_CASE = SeqaSeqDataset( a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , ) SCREAMING_SNAKE_CASE = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: SCREAMING_SNAKE_CASE = SeqaSeqDataset( a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) , type_path='train' , max_source_length=4 , max_target_length=8 , ) SCREAMING_SNAKE_CASE = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(a) == 1 if tok_name == BART_TINY else len(a) == 0
137
def lowerCamelCase__ (_UpperCAmelCase): def merge(_UpperCAmelCase , _UpperCAmelCase) -> list: def _merge(): while left and right: yield (left if left[0] <= right[0] else right).pop(0) yield from left yield from right return list(_merge()) if len(_UpperCAmelCase) <= 1: return collection SCREAMING_SNAKE_CASE = len(_UpperCAmelCase) // 2 return merge(merge_sort(collection[:mid]) , merge_sort(collection[mid:])) if __name__ == "__main__": import doctest doctest.testmod() a_ : Tuple = input('Enter numbers separated by a comma:\n').strip() a_ : Optional[Any] = [int(item) for item in user_input.split(',')] print(*merge_sort(unsorted), sep=',')
137
1
'''simple docstring''' import argparse import os from . import ( ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BART_PRETRAINED_MODEL_ARCHIVE_LIST, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, BartConfig, BertConfig, CamembertConfig, CTRLConfig, DistilBertConfig, DPRConfig, ElectraConfig, FlaubertConfig, GPTaConfig, LayoutLMConfig, LxmertConfig, OpenAIGPTConfig, RobertaConfig, TaConfig, TFAlbertForPreTraining, TFBartForConditionalGeneration, TFBartForSequenceClassification, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFCamembertForMaskedLM, TFCTRLLMHeadModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, TFElectraForPreTraining, TFFlaubertWithLMHeadModel, TFGPTaLMHeadModel, TFLayoutLMForMaskedLM, TFLxmertForPreTraining, TFLxmertVisualFeatureEncoder, TFOpenAIGPTLMHeadModel, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForSequenceClassification, TFTaForConditionalGeneration, TFTransfoXLLMHeadModel, TFWavaVecaModel, TFXLMRobertaForMaskedLM, TFXLMWithLMHeadModel, TFXLNetLMHeadModel, TransfoXLConfig, WavaVecaConfig, WavaVecaModel, XLMConfig, XLMRobertaConfig, XLNetConfig, is_torch_available, load_pytorch_checkpoint_in_tfa_model, ) from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging if is_torch_available(): import numpy as np import torch from . import ( AlbertForPreTraining, BartForConditionalGeneration, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, CamembertForMaskedLM, CTRLLMHeadModel, DistilBertForMaskedLM, DistilBertForQuestionAnswering, DPRContextEncoder, DPRQuestionEncoder, DPRReader, ElectraForPreTraining, FlaubertWithLMHeadModel, GPTaLMHeadModel, LayoutLMForMaskedLM, LxmertForPreTraining, LxmertVisualFeatureEncoder, OpenAIGPTLMHeadModel, RobertaForMaskedLM, RobertaForSequenceClassification, TaForConditionalGeneration, TransfoXLLMHeadModel, XLMRobertaForMaskedLM, XLMWithLMHeadModel, XLNetLMHeadModel, ) logging.set_verbosity_info() _SCREAMING_SNAKE_CASE = { "bart": ( BartConfig, TFBartForConditionalGeneration, TFBartForSequenceClassification, BartForConditionalGeneration, BART_PRETRAINED_MODEL_ARCHIVE_LIST, ), "bert": ( BertConfig, TFBertForPreTraining, BertForPreTraining, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "bert-large-uncased-whole-word-masking-finetuned-squad": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "bert-large-cased-whole-word-masking-finetuned-squad": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "bert-base-cased-finetuned-mrpc": ( BertConfig, TFBertForSequenceClassification, BertForSequenceClassification, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "dpr": ( DPRConfig, TFDPRQuestionEncoder, TFDPRContextEncoder, TFDPRReader, DPRQuestionEncoder, DPRContextEncoder, DPRReader, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ), "gpt2": ( GPTaConfig, TFGPTaLMHeadModel, GPTaLMHeadModel, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "xlnet": ( XLNetConfig, TFXLNetLMHeadModel, XLNetLMHeadModel, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "xlm": ( XLMConfig, TFXLMWithLMHeadModel, XLMWithLMHeadModel, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "xlm-roberta": ( XLMRobertaConfig, TFXLMRobertaForMaskedLM, XLMRobertaForMaskedLM, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "transfo-xl": ( TransfoXLConfig, TFTransfoXLLMHeadModel, TransfoXLLMHeadModel, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "openai-gpt": ( OpenAIGPTConfig, TFOpenAIGPTLMHeadModel, OpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "roberta": ( RobertaConfig, TFRobertaForCausalLM, TFRobertaForMaskedLM, RobertaForMaskedLM, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "layoutlm": ( LayoutLMConfig, TFLayoutLMForMaskedLM, LayoutLMForMaskedLM, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, ), "roberta-large-mnli": ( RobertaConfig, TFRobertaForSequenceClassification, RobertaForSequenceClassification, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "camembert": ( CamembertConfig, TFCamembertForMaskedLM, CamembertForMaskedLM, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "flaubert": ( FlaubertConfig, TFFlaubertWithLMHeadModel, FlaubertWithLMHeadModel, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "distilbert": ( DistilBertConfig, TFDistilBertForMaskedLM, DistilBertForMaskedLM, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "distilbert-base-distilled-squad": ( DistilBertConfig, TFDistilBertForQuestionAnswering, DistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "lxmert": ( LxmertConfig, TFLxmertForPreTraining, LxmertForPreTraining, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "lxmert-visual-feature-encoder": ( LxmertConfig, TFLxmertVisualFeatureEncoder, LxmertVisualFeatureEncoder, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "ctrl": ( CTRLConfig, TFCTRLLMHeadModel, CTRLLMHeadModel, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "albert": ( AlbertConfig, TFAlbertForPreTraining, AlbertForPreTraining, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "t5": ( TaConfig, TFTaForConditionalGeneration, TaForConditionalGeneration, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "electra": ( ElectraConfig, TFElectraForPreTraining, ElectraForPreTraining, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "wav2vec2": ( WavaVecaConfig, TFWavaVecaModel, WavaVecaModel, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), } def __lowerCamelCase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Dict=True ) -> Optional[int]: if model_type not in MODEL_CLASSES: raise ValueError(F'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' ) snake_case , snake_case , snake_case , snake_case = MODEL_CLASSES[model_type] # Initialise TF model if config_file in aws_config_map: snake_case = cached_file(__lowerCAmelCase , __lowerCAmelCase , force_download=not use_cached_models ) snake_case = config_class.from_json_file(__lowerCAmelCase ) snake_case = True snake_case = True print(F'''Building TensorFlow model from configuration: {config}''' ) snake_case = model_class(__lowerCAmelCase ) # Load weights from tf checkpoint if pytorch_checkpoint_path in aws_config_map.keys(): snake_case = cached_file( __lowerCAmelCase , __lowerCAmelCase , force_download=not use_cached_models ) # Load PyTorch checkpoint in tf2 model: snake_case = load_pytorch_checkpoint_in_tfa_model(__lowerCAmelCase , __lowerCAmelCase ) if compare_with_pt_model: snake_case = tf_model(tf_model.dummy_inputs , training=__lowerCAmelCase ) # build the network snake_case = torch.load(__lowerCAmelCase , map_location="""cpu""" ) snake_case = pt_model_class.from_pretrained( pretrained_model_name_or_path=__lowerCAmelCase , config=__lowerCAmelCase , state_dict=__lowerCAmelCase ) with torch.no_grad(): snake_case = pt_model(**pt_model.dummy_inputs ) snake_case = pto[0].numpy() snake_case = tfo[0].numpy() snake_case = np.amax(np.abs(np_pt - np_tf ) ) print(F'''Max absolute difference between models outputs {diff}''' ) assert diff <= 2e-2, F'''Error, model absolute difference is >2e-2: {diff}''' # Save pytorch-model print(F'''Save TensorFlow model to {tf_dump_path}''' ) tf_model.save_weights(__lowerCAmelCase , save_format="""h5""" ) def __lowerCamelCase ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : Any=False , __lowerCAmelCase : Union[str, Any]=False , ) -> Union[str, Any]: if args_model_type is None: snake_case = list(MODEL_CLASSES.keys() ) else: snake_case = [args_model_type] for j, model_type in enumerate(__lowerCAmelCase , start=1 ): print("""=""" * 1_00 ) print(F''' Converting model type {j}/{len(__lowerCAmelCase )}: {model_type}''' ) print("""=""" * 1_00 ) if model_type not in MODEL_CLASSES: raise ValueError(F'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' ) snake_case , snake_case , snake_case , snake_case , snake_case = MODEL_CLASSES[model_type] if model_shortcut_names_or_path is None: snake_case = list(aws_model_maps.keys() ) if config_shortcut_names_or_path is None: snake_case = model_shortcut_names_or_path for i, (model_shortcut_name, config_shortcut_name) in enumerate( zip(__lowerCAmelCase , __lowerCAmelCase ) , start=1 ): print("""-""" * 1_00 ) if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name: if not only_convert_finetuned_models: print(F''' Skipping finetuned checkpoint {model_shortcut_name}''' ) continue snake_case = model_shortcut_name elif only_convert_finetuned_models: print(F''' Skipping not finetuned checkpoint {model_shortcut_name}''' ) continue print( F''' Converting checkpoint {i}/{len(__lowerCAmelCase )}: {model_shortcut_name} - model_type {model_type}''' ) print("""-""" * 1_00 ) if config_shortcut_name in aws_config_map: snake_case = cached_file(__lowerCAmelCase , __lowerCAmelCase , force_download=not use_cached_models ) else: snake_case = config_shortcut_name if model_shortcut_name in aws_model_maps: snake_case = cached_file(__lowerCAmelCase , __lowerCAmelCase , force_download=not use_cached_models ) else: snake_case = model_shortcut_name if os.path.isfile(__lowerCAmelCase ): snake_case = """converted_model""" convert_pt_checkpoint_to_tf( model_type=__lowerCAmelCase , pytorch_checkpoint_path=__lowerCAmelCase , config_file=__lowerCAmelCase , tf_dump_path=os.path.join(__lowerCAmelCase , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=__lowerCAmelCase , ) if remove_cached_files: os.remove(__lowerCAmelCase ) os.remove(__lowerCAmelCase ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_dump_path", default=None, type=str, required=True, help="Path to the output Tensorflow dump file." ) parser.add_argument( "--model_type", default=None, type=str, help=( F"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """ "convert all the models from AWS." ), ) parser.add_argument( "--pytorch_checkpoint_path", default=None, type=str, help=( "Path to the PyTorch checkpoint path or shortcut name to download from AWS. " "If not given, will download and convert all the checkpoints from AWS." ), ) parser.add_argument( "--config_file", default=None, type=str, help=( "The config json file corresponding to the pre-trained model. \n" "This specifies the model architecture. If not given and " "--pytorch_checkpoint_path is not given or is a shortcut name " "use the configuration associated to the shortcut name on the AWS" ), ) parser.add_argument( "--compare_with_pt_model", action="store_true", help="Compare Tensorflow and PyTorch model predictions." ) parser.add_argument( "--use_cached_models", action="store_true", help="Use cached models if possible instead of updating to latest checkpoint versions.", ) parser.add_argument( "--remove_cached_files", action="store_true", help="Remove pytorch models after conversion (save memory when converting in batches).", ) parser.add_argument("--only_convert_finetuned_models", action="store_true", help="Only convert finetuned models.") _SCREAMING_SNAKE_CASE = parser.parse_args() # if args.pytorch_checkpoint_path is not None: # convert_pt_checkpoint_to_tf(args.model_type.lower(), # args.pytorch_checkpoint_path, # args.config_file if args.config_file is not None else args.pytorch_checkpoint_path, # args.tf_dump_path, # compare_with_pt_model=args.compare_with_pt_model, # use_cached_models=args.use_cached_models) # else: convert_all_pt_checkpoints_to_tf( args.model_type.lower() if args.model_type is not None else None, args.tf_dump_path, model_shortcut_names_or_path=[args.pytorch_checkpoint_path] if args.pytorch_checkpoint_path is not None else None, config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None, compare_with_pt_model=args.compare_with_pt_model, use_cached_models=args.use_cached_models, remove_cached_files=args.remove_cached_files, only_convert_finetuned_models=args.only_convert_finetuned_models, )
3
'''simple docstring''' def __lowerCamelCase ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int ) -> list: snake_case = len(__lowerCAmelCase ) snake_case = [[0] * n for i in range(__lowerCAmelCase )] for i in range(__lowerCAmelCase ): snake_case = y_points[i] for i in range(2 , __lowerCAmelCase ): for j in range(__lowerCAmelCase , __lowerCAmelCase ): snake_case = ( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
3
1
"""simple docstring""" from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers UpperCAmelCase : List[str] = [ """python""", """tqdm""", """regex""", """requests""", """packaging""", """filelock""", """numpy""", """tokenizers""", """huggingface-hub""", """safetensors""", """accelerate""", """pyyaml""", ] for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed elif pkg == "accelerate": # must be loaded here, or else tqdm check may fail from .utils import is_accelerate_available # Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of # Transformers with PyTorch if not is_accelerate_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F"can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py") def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase=None ) -> int: '''simple docstring''' require_version(deps[pkg] , __lowerCAmelCase )
136
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __a: List[str] = logging.get_logger(__name__) class UpperCAmelCase ( a__ ): '''simple docstring''' SCREAMING_SNAKE_CASE = ["pixel_values"] def __init__( self , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = PILImageResampling.BICUBIC , __lowerCAmelCase = True , __lowerCAmelCase = 1 / 255 , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = True , **__lowerCAmelCase , ) -> None: super().__init__(**__lowerCAmelCase ) lowercase__ : Optional[int] = size if size is not None else {'''height''': 384, '''width''': 384} lowercase__ : Optional[Any] = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase ) lowercase__ : Dict = do_resize lowercase__ : int = size lowercase__ : int = resample lowercase__ : Tuple = do_rescale lowercase__ : int = rescale_factor lowercase__ : int = do_normalize lowercase__ : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN lowercase__ : Optional[int] = image_std if image_std is not None else OPENAI_CLIP_STD lowercase__ : Tuple = do_convert_rgb def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = PILImageResampling.BICUBIC , __lowerCAmelCase = None , **__lowerCAmelCase , ) -> np.ndarray: lowercase__ : Union[str, Any] = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" ) lowercase__ : Any = (size['''height'''], size['''width''']) return resize(__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase ) def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ) -> str: return rescale(__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase ) def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ) -> np.ndarray: return normalize(__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase ) def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = ChannelDimension.FIRST , **__lowerCAmelCase , ) -> PIL.Image.Image: lowercase__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize lowercase__ : Any = resample if resample is not None else self.resample lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale lowercase__ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase__ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize lowercase__ : Dict = image_mean if image_mean is not None else self.image_mean lowercase__ : Dict = image_std if image_std is not None else self.image_std lowercase__ : Dict = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowercase__ : Optional[int] = size if size is not None else self.size lowercase__ : int = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase ) lowercase__ : str = make_list_of_images(__lowerCAmelCase ) if not valid_images(__lowerCAmelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowercase__ : Optional[Any] = [convert_to_rgb(__lowerCAmelCase ) for image in images] # All transformations expect numpy arrays. lowercase__ : Any = [to_numpy_array(__lowerCAmelCase ) for image in images] if do_resize: lowercase__ : Tuple = [self.resize(image=__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase ) for image in images] if do_rescale: lowercase__ : List[str] = [self.rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase ) for image in images] if do_normalize: lowercase__ : Tuple = [self.normalize(image=__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase ) for image in images] lowercase__ : List[str] = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase ) for image in images] lowercase__ : Optional[Any] = BatchFeature(data={'''pixel_values''': images} , tensor_type=__lowerCAmelCase ) return encoded_outputs
198
0
import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings _lowerCamelCase : Optional[Any] = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class __UpperCAmelCase ( a_ ): UpperCamelCase = field(default=a_ , metadata={"""help""": """Whether to use SortishSampler or not."""} ) UpperCamelCase = field( default=a_ , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} ) UpperCamelCase = field( default=a_ , metadata={ """help""": ( """The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """ """to the `max_length` value of the model configuration.""" ) } , ) UpperCamelCase = field( default=a_ , metadata={ """help""": ( """The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """ """to the `num_beams` value of the model configuration.""" ) } , ) UpperCamelCase = field( default=a_ , metadata={ """help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.""" } , ) def __magic_name__ ( self : Optional[Any] ): UpperCAmelCase : Optional[int] = super().to_dict() for k, v in d.items(): if isinstance(lowercase_, lowercase_ ): UpperCAmelCase : Tuple = v.to_dict() return d
358
from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING _lowerCamelCase : Dict = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase__ ) class __UpperCAmelCase ( lowerCamelCase__ ): def __init__( self : Optional[Any], *__A : Tuple, **__A : Tuple ): super().__init__(*__A, **__A ) self.check_model_type(__A ) def __magic_name__ ( self : Union[str, Any], __A : int=None, __A : Tuple=None, __A : Any=None, **__A : Optional[int] ): UpperCAmelCase , UpperCAmelCase : List[Any] = {}, {} if padding is not None: UpperCAmelCase : Optional[int] = padding if truncation is not None: UpperCAmelCase : Optional[int] = truncation if top_k is not None: UpperCAmelCase : Tuple = top_k return preprocess_params, {}, postprocess_params def __call__( self : Union[str, Any], __A : Union["Image.Image", str], __A : str = None, **__A : Optional[int] ): if isinstance(__A, (Image.Image, str) ) and isinstance(__A, __A ): UpperCAmelCase : int = {'''image''': image, '''question''': question} else: UpperCAmelCase : str = image UpperCAmelCase : Union[str, Any] = super().__call__(__A, **__A ) return results def __magic_name__ ( self : List[str], __A : Union[str, Any], __A : Tuple=False, __A : List[Any]=False ): UpperCAmelCase : int = load_image(inputs['''image'''] ) UpperCAmelCase : List[str] = self.tokenizer( inputs['''question'''], return_tensors=self.framework, padding=__A, truncation=__A ) UpperCAmelCase : Union[str, Any] = self.image_processor(images=__A, return_tensors=self.framework ) model_inputs.update(__A ) return model_inputs def __magic_name__ ( self : Optional[Any], __A : List[Any] ): UpperCAmelCase : Optional[int] = self.model(**__A ) return model_outputs def __magic_name__ ( self : Any, __A : List[str], __A : Union[str, Any]=5 ): if top_k > self.model.config.num_labels: UpperCAmelCase : Any = self.model.config.num_labels if self.framework == "pt": UpperCAmelCase : Any = model_outputs.logits.sigmoid()[0] UpperCAmelCase , UpperCAmelCase : Union[str, Any] = probs.topk(__A ) else: raise ValueError(F'''Unsupported framework: {self.framework}''' ) UpperCAmelCase : str = scores.tolist() UpperCAmelCase : Tuple = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(__A, __A )]
99
0
import re def __UpperCamelCase ( _lowerCAmelCase ) -> bool: """simple docstring""" A : Optional[int] = re.compile(R"""^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$""" ) if match := re.search(__lowerCamelCase , __lowerCamelCase ): return match.string == phone return False if __name__ == "__main__": print(indian_phone_validator("""+918827897895"""))
116
'''simple docstring''' import argparse import os import re import numpy as np import PIL import torch from timm import create_model from torch.optim.lr_scheduler import OneCycleLR from torch.utils.data import DataLoader, Dataset from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor from accelerate import Accelerator def lowerCamelCase ( __lowerCamelCase : Tuple ) ->Tuple: _SCREAMING_SNAKE_CASE = fname.split(os.path.sep )[-1] return re.search(R"""^(.*)_\d+\.jpg$""" , __lowerCamelCase ).groups()[0] class a_ ( snake_case_ ): '''simple docstring''' def __init__( self , A , A=None , A=None ) -> int: _SCREAMING_SNAKE_CASE = file_names _SCREAMING_SNAKE_CASE = image_transform _SCREAMING_SNAKE_CASE = label_to_id def __len__( self ) -> Optional[Any]: return len(self.file_names ) def __getitem__( self , A ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = self.file_names[idx] _SCREAMING_SNAKE_CASE = PIL.Image.open(A ) _SCREAMING_SNAKE_CASE = raw_image.convert("""RGB""" ) if self.image_transform is not None: _SCREAMING_SNAKE_CASE = self.image_transform(A ) _SCREAMING_SNAKE_CASE = extract_label(A ) if self.label_to_id is not None: _SCREAMING_SNAKE_CASE = self.label_to_id[label] return {"image": image, "label": label} def lowerCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : Tuple ) ->str: # Initialize accelerator if args.with_tracking: _SCREAMING_SNAKE_CASE = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir ) else: _SCREAMING_SNAKE_CASE = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _SCREAMING_SNAKE_CASE = config["""lr"""] _SCREAMING_SNAKE_CASE = int(config["""num_epochs"""] ) _SCREAMING_SNAKE_CASE = int(config["""seed"""] ) _SCREAMING_SNAKE_CASE = int(config["""batch_size"""] ) _SCREAMING_SNAKE_CASE = config["""image_size"""] if not isinstance(__lowerCamelCase , (list, tuple) ): _SCREAMING_SNAKE_CASE = (image_size, image_size) # Parse out whether we are saving every epoch or after a certain number of batches if hasattr(args.checkpointing_steps , """isdigit""" ): if args.checkpointing_steps == "epoch": _SCREAMING_SNAKE_CASE = args.checkpointing_steps elif args.checkpointing_steps.isdigit(): _SCREAMING_SNAKE_CASE = int(args.checkpointing_steps ) else: raise ValueError( F'Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.' ) else: _SCREAMING_SNAKE_CASE = None # We need to initialize the trackers we use, and also store our configuration if args.with_tracking: _SCREAMING_SNAKE_CASE = os.path.split(__lowerCamelCase )[-1].split(""".""" )[0] accelerator.init_trackers(__lowerCamelCase , __lowerCamelCase ) # Grab all the image filenames _SCREAMING_SNAKE_CASE = [os.path.join(args.data_dir , __lowerCamelCase ) for fname in os.listdir(args.data_dir ) if fname.endswith(""".jpg""" )] # Build the label correspondences _SCREAMING_SNAKE_CASE = [extract_label(__lowerCamelCase ) for fname in file_names] _SCREAMING_SNAKE_CASE = list(set(__lowerCamelCase ) ) id_to_label.sort() _SCREAMING_SNAKE_CASE = {lbl: i for i, lbl in enumerate(__lowerCamelCase )} # Set the seed before splitting the data. np.random.seed(__lowerCamelCase ) torch.manual_seed(__lowerCamelCase ) torch.cuda.manual_seed_all(__lowerCamelCase ) # Split our filenames between train and validation _SCREAMING_SNAKE_CASE = np.random.permutation(len(__lowerCamelCase ) ) _SCREAMING_SNAKE_CASE = int(0.8 * len(__lowerCamelCase ) ) _SCREAMING_SNAKE_CASE = random_perm[:cut] _SCREAMING_SNAKE_CASE = random_perm[cut:] # For training we use a simple RandomResizedCrop _SCREAMING_SNAKE_CASE = Compose([RandomResizedCrop(__lowerCamelCase , scale=(0.5, 1.0) ), ToTensor()] ) _SCREAMING_SNAKE_CASE = PetsDataset( [file_names[i] for i in train_split] , image_transform=__lowerCamelCase , label_to_id=__lowerCamelCase ) # For evaluation, we use a deterministic Resize _SCREAMING_SNAKE_CASE = Compose([Resize(__lowerCamelCase ), ToTensor()] ) _SCREAMING_SNAKE_CASE = PetsDataset([file_names[i] for i in eval_split] , image_transform=__lowerCamelCase , label_to_id=__lowerCamelCase ) # Instantiate dataloaders. _SCREAMING_SNAKE_CASE = DataLoader(__lowerCamelCase , shuffle=__lowerCamelCase , batch_size=__lowerCamelCase , num_workers=4 ) _SCREAMING_SNAKE_CASE = DataLoader(__lowerCamelCase , shuffle=__lowerCamelCase , batch_size=__lowerCamelCase , num_workers=4 ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _SCREAMING_SNAKE_CASE = create_model("""resnet50d""" , pretrained=__lowerCamelCase , num_classes=len(__lowerCamelCase ) ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _SCREAMING_SNAKE_CASE = model.to(accelerator.device ) # Freezing the base model for param in model.parameters(): _SCREAMING_SNAKE_CASE = False for param in model.get_classifier().parameters(): _SCREAMING_SNAKE_CASE = True # We normalize the batches of images to be a bit faster. _SCREAMING_SNAKE_CASE = torch.tensor(model.default_cfg["""mean"""] )[None, :, None, None].to(accelerator.device ) _SCREAMING_SNAKE_CASE = torch.tensor(model.default_cfg["""std"""] )[None, :, None, None].to(accelerator.device ) # Instantiate optimizer _SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=lr / 25 ) # Instantiate learning rate scheduler _SCREAMING_SNAKE_CASE = OneCycleLR(optimizer=__lowerCamelCase , max_lr=__lowerCamelCase , epochs=__lowerCamelCase , steps_per_epoch=len(__lowerCamelCase ) ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = accelerator.prepare( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # We need to keep track of how many total steps we have iterated over _SCREAMING_SNAKE_CASE = 0 # We also need to keep track of the starting epoch so files are named properly _SCREAMING_SNAKE_CASE = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": accelerator.print(F'Resumed from checkpoint: {args.resume_from_checkpoint}' ) accelerator.load_state(args.resume_from_checkpoint ) _SCREAMING_SNAKE_CASE = os.path.basename(args.resume_from_checkpoint ) else: # Get the most recent checkpoint _SCREAMING_SNAKE_CASE = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()] dirs.sort(key=os.path.getctime ) _SCREAMING_SNAKE_CASE = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last # Extract `epoch_{i}` or `step_{i}` _SCREAMING_SNAKE_CASE = os.path.splitext(__lowerCamelCase )[0] if "epoch" in training_difference: _SCREAMING_SNAKE_CASE = int(training_difference.replace("""epoch_""" , """""" ) ) + 1 _SCREAMING_SNAKE_CASE = None else: _SCREAMING_SNAKE_CASE = int(training_difference.replace("""step_""" , """""" ) ) _SCREAMING_SNAKE_CASE = resume_step // len(__lowerCamelCase ) resume_step -= starting_epoch * len(__lowerCamelCase ) # Now we train the model for epoch in range(__lowerCamelCase , __lowerCamelCase ): model.train() if args.with_tracking: _SCREAMING_SNAKE_CASE = 0 if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We need to skip steps until we reach the resumed step _SCREAMING_SNAKE_CASE = accelerator.skip_first_batches(__lowerCamelCase , __lowerCamelCase ) overall_step += resume_step else: # After the first iteration though, we need to go back to the original dataloader _SCREAMING_SNAKE_CASE = train_dataloader for batch in active_dataloader: # We could avoid this line since we set the accelerator with `device_placement=True`. _SCREAMING_SNAKE_CASE = {k: v.to(accelerator.device ) for k, v in batch.items()} _SCREAMING_SNAKE_CASE = (batch["""image"""] - mean) / std _SCREAMING_SNAKE_CASE = model(__lowerCamelCase ) _SCREAMING_SNAKE_CASE = torch.nn.functional.cross_entropy(__lowerCamelCase , batch["""label"""] ) # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() accelerator.backward(__lowerCamelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 if isinstance(__lowerCamelCase , __lowerCamelCase ): _SCREAMING_SNAKE_CASE = F'step_{overall_step}' if overall_step % checkpointing_steps == 0: if args.output_dir is not None: _SCREAMING_SNAKE_CASE = os.path.join(args.output_dir , __lowerCamelCase ) accelerator.save_state(__lowerCamelCase ) model.eval() _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = 0 for step, batch in enumerate(__lowerCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. _SCREAMING_SNAKE_CASE = {k: v.to(accelerator.device ) for k, v in batch.items()} _SCREAMING_SNAKE_CASE = (batch["""image"""] - mean) / std with torch.no_grad(): _SCREAMING_SNAKE_CASE = model(__lowerCamelCase ) _SCREAMING_SNAKE_CASE = outputs.argmax(dim=-1 ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((predictions, batch["""label"""]) ) _SCREAMING_SNAKE_CASE = predictions == references num_elems += accurate_preds.shape[0] accurate += accurate_preds.long().sum() _SCREAMING_SNAKE_CASE = accurate.item() / num_elems # Use accelerator.print to print only on the main process. accelerator.print(F'epoch {epoch}: {100 * eval_metric:.2f}' ) if args.with_tracking: accelerator.log( { """accuracy""": 100 * eval_metric, """train_loss""": total_loss.item() / len(__lowerCamelCase ), """epoch""": epoch, } , step=__lowerCamelCase , ) if checkpointing_steps == "epoch": _SCREAMING_SNAKE_CASE = F'epoch_{epoch}' if args.output_dir is not None: _SCREAMING_SNAKE_CASE = os.path.join(args.output_dir , __lowerCamelCase ) accelerator.save_state(__lowerCamelCase ) if args.with_tracking: accelerator.end_training() def lowerCamelCase ( ) ->int: _SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument("""--data_dir""" , required=__lowerCamelCase , help="""The data folder on disk.""" ) parser.add_argument("""--fp16""" , action="""store_true""" , help="""If passed, will use FP16 training.""" ) parser.add_argument( """--mixed_precision""" , type=__lowerCamelCase , default=__lowerCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) parser.add_argument( """--checkpointing_steps""" , type=__lowerCamelCase , default=__lowerCamelCase , help="""Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.""" , ) parser.add_argument( """--output_dir""" , type=__lowerCamelCase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , ) parser.add_argument( """--resume_from_checkpoint""" , type=__lowerCamelCase , default=__lowerCamelCase , help="""If the training should continue from a checkpoint folder.""" , ) parser.add_argument( """--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , ) parser.add_argument( """--project_dir""" , type=__lowerCamelCase , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , ) _SCREAMING_SNAKE_CASE = parser.parse_args() _SCREAMING_SNAKE_CASE = {"""lr""": 3e-2, """num_epochs""": 3, """seed""": 42, """batch_size""": 64, """image_size""": 224} training_function(__lowerCamelCase , __lowerCamelCase ) if __name__ == "__main__": main()
58
0
"""simple docstring""" UpperCAmelCase : int = [ 999, 800, 799, 600, 599, 500, 400, 399, 377, 355, 333, 311, 288, 266, 244, 222, 200, 199, 177, 155, 133, 111, 88, 66, 44, 22, 0, ] UpperCAmelCase : Optional[int] = [ 999, 976, 952, 928, 905, 882, 858, 857, 810, 762, 715, 714, 572, 429, 428, 286, 285, 238, 190, 143, 142, 118, 95, 71, 47, 24, 0, ] UpperCAmelCase : List[Any] = [ 999, 988, 977, 966, 955, 944, 933, 922, 911, 900, 899, 879, 859, 840, 820, 800, 799, 766, 733, 700, 699, 650, 600, 599, 500, 499, 400, 399, 350, 300, 299, 266, 233, 200, 199, 179, 159, 140, 120, 100, 99, 88, 77, 66, 55, 44, 33, 22, 11, 0, ] UpperCAmelCase : Optional[int] = [ 999, 995, 992, 989, 985, 981, 978, 975, 971, 967, 964, 961, 957, 956, 951, 947, 942, 937, 933, 928, 923, 919, 914, 913, 908, 903, 897, 892, 887, 881, 876, 871, 870, 864, 858, 852, 846, 840, 834, 828, 827, 820, 813, 806, 799, 792, 785, 784, 777, 770, 763, 756, 749, 742, 741, 733, 724, 716, 707, 699, 698, 688, 677, 666, 656, 655, 645, 634, 623, 613, 612, 598, 584, 570, 569, 555, 541, 527, 526, 505, 484, 483, 462, 440, 439, 396, 395, 352, 351, 308, 307, 264, 263, 220, 219, 176, 132, 88, 44, 0, ] UpperCAmelCase : int = [ 999, 997, 995, 992, 990, 988, 986, 984, 981, 979, 977, 975, 972, 970, 968, 966, 964, 961, 959, 957, 956, 954, 951, 949, 946, 944, 941, 939, 936, 934, 931, 929, 926, 924, 921, 919, 916, 914, 913, 910, 907, 905, 902, 899, 896, 893, 891, 888, 885, 882, 879, 877, 874, 871, 870, 867, 864, 861, 858, 855, 852, 849, 846, 843, 840, 837, 834, 831, 828, 827, 824, 821, 817, 814, 811, 808, 804, 801, 798, 795, 791, 788, 785, 784, 780, 777, 774, 770, 766, 763, 760, 756, 752, 749, 746, 742, 741, 737, 733, 730, 726, 722, 718, 714, 710, 707, 703, 699, 698, 694, 690, 685, 681, 677, 673, 669, 664, 660, 656, 655, 650, 646, 641, 636, 632, 627, 622, 618, 613, 612, 607, 602, 596, 591, 586, 580, 575, 570, 569, 563, 557, 551, 545, 539, 533, 527, 526, 519, 512, 505, 498, 491, 484, 483, 474, 466, 457, 449, 440, 439, 428, 418, 407, 396, 395, 381, 366, 352, 351, 330, 308, 307, 286, 264, 263, 242, 220, 219, 176, 175, 132, 131, 88, 44, 0, ] UpperCAmelCase : Optional[Any] = [ 999, 991, 982, 974, 966, 958, 950, 941, 933, 925, 916, 908, 900, 899, 874, 850, 825, 800, 799, 700, 600, 500, 400, 300, 200, 100, 0, ] UpperCAmelCase : Optional[Any] = [ 999, 992, 985, 978, 971, 964, 957, 949, 942, 935, 928, 921, 914, 907, 900, 899, 879, 859, 840, 820, 800, 799, 766, 733, 700, 699, 650, 600, 599, 500, 499, 400, 399, 300, 299, 200, 199, 100, 99, 0, ] UpperCAmelCase : Optional[int] = [ 999, 996, 992, 989, 985, 982, 979, 975, 972, 968, 965, 961, 958, 955, 951, 948, 944, 941, 938, 934, 931, 927, 924, 920, 917, 914, 910, 907, 903, 900, 899, 891, 884, 876, 869, 861, 853, 846, 838, 830, 823, 815, 808, 800, 799, 788, 777, 766, 755, 744, 733, 722, 711, 700, 699, 688, 677, 666, 655, 644, 633, 622, 611, 600, 599, 585, 571, 557, 542, 528, 514, 500, 499, 485, 471, 457, 442, 428, 414, 400, 399, 379, 359, 340, 320, 300, 299, 279, 259, 240, 220, 200, 199, 166, 133, 100, 99, 66, 33, 0, ]
363
"""simple docstring""" from collections.abc import Sequence def lowerCamelCase ( _UpperCamelCase : Sequence[float] , _UpperCamelCase : float ) -> float: '''simple docstring''' return sum(c * (x**i) for i, c in enumerate(_UpperCamelCase ) ) def lowerCamelCase ( _UpperCamelCase : Sequence[float] , _UpperCamelCase : float ) -> float: '''simple docstring''' __UpperCAmelCase : Dict = 0.0 for coeff in reversed(_UpperCamelCase ): __UpperCAmelCase : Any = result * x + coeff return result if __name__ == "__main__": UpperCAmelCase : str = (0.0, 0.0, 5.0, 9.3, 7.0) UpperCAmelCase : str = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
320
0
"""simple docstring""" import baseaa def __A ( a_ :str) -> bytes: return baseaa.aaaencode(string.encode('''utf-8''')) def __A ( a_ :bytes) -> str: return baseaa.aaadecode(a_).decode('''utf-8''') if __name__ == "__main__": import doctest doctest.testmod()
160
"""simple docstring""" import unittest import numpy as np from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING from transformers.pipelines import AudioClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_torchaudio, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class __lowercase ( unittest.TestCase ): '''simple docstring''' __lowerCAmelCase = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING __lowerCAmelCase = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a : Any = AudioClassificationPipeline(model=_UpperCAmelCase , feature_extractor=_UpperCAmelCase ) # test with a raw waveform __a : Optional[Any] = np.zeros((34000,) ) __a : Union[str, Any] = np.zeros((14000,) ) return audio_classifier, [audioa, audio] def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ): __a , __a : Dict = examples __a : Tuple = audio_classifier(_UpperCAmelCase ) # by default a model is initialized with num_labels=2 self.assertEqual( _UpperCAmelCase , [ {'''score''': ANY(_UpperCAmelCase ), '''label''': ANY(_UpperCAmelCase )}, {'''score''': ANY(_UpperCAmelCase ), '''label''': ANY(_UpperCAmelCase )}, ] , ) __a : List[Any] = audio_classifier(_UpperCAmelCase , top_k=1 ) self.assertEqual( _UpperCAmelCase , [ {'''score''': ANY(_UpperCAmelCase ), '''label''': ANY(_UpperCAmelCase )}, ] , ) self.run_torchaudio(_UpperCAmelCase ) @require_torchaudio def _lowerCamelCase ( self , _UpperCAmelCase ): import datasets # test with a local file __a : Tuple = datasets.load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) __a : Union[str, Any] = dataset[0]['''audio''']['''array'''] __a : Tuple = audio_classifier(_UpperCAmelCase ) self.assertEqual( _UpperCAmelCase , [ {'''score''': ANY(_UpperCAmelCase ), '''label''': ANY(_UpperCAmelCase )}, {'''score''': ANY(_UpperCAmelCase ), '''label''': ANY(_UpperCAmelCase )}, ] , ) @require_torch def _lowerCamelCase ( self ): __a : Optional[Any] = '''anton-l/wav2vec2-random-tiny-classifier''' __a : Union[str, Any] = pipeline('''audio-classification''' , model=_UpperCAmelCase ) __a : Optional[int] = np.ones((8000,) ) __a : Optional[int] = audio_classifier(_UpperCAmelCase , top_k=4 ) __a : Tuple = [ {'''score''': 0.0_8_4_2, '''label''': '''no'''}, {'''score''': 0.0_8_3_8, '''label''': '''up'''}, {'''score''': 0.0_8_3_7, '''label''': '''go'''}, {'''score''': 0.0_8_3_4, '''label''': '''right'''}, ] __a : Dict = [ {'''score''': 0.0_8_4_5, '''label''': '''stop'''}, {'''score''': 0.0_8_4_4, '''label''': '''on'''}, {'''score''': 0.0_8_4_1, '''label''': '''right'''}, {'''score''': 0.0_8_3_4, '''label''': '''left'''}, ] self.assertIn(nested_simplify(_UpperCAmelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] ) __a : List[Any] = {'''array''': np.ones((8000,) ), '''sampling_rate''': audio_classifier.feature_extractor.sampling_rate} __a : Optional[Any] = audio_classifier(_UpperCAmelCase , top_k=4 ) self.assertIn(nested_simplify(_UpperCAmelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] ) @require_torch @slow def _lowerCamelCase ( self ): import datasets __a : Tuple = '''superb/wav2vec2-base-superb-ks''' __a : Optional[int] = pipeline('''audio-classification''' , model=_UpperCAmelCase ) __a : int = datasets.load_dataset('''anton-l/superb_dummy''' , '''ks''' , split='''test''' ) __a : Any = np.array(dataset[3]['''speech'''] , dtype=np.floataa ) __a : Tuple = audio_classifier(_UpperCAmelCase , top_k=4 ) self.assertEqual( nested_simplify(_UpperCAmelCase , decimals=3 ) , [ {'''score''': 0.9_8_1, '''label''': '''go'''}, {'''score''': 0.0_0_7, '''label''': '''up'''}, {'''score''': 0.0_0_6, '''label''': '''_unknown_'''}, {'''score''': 0.0_0_1, '''label''': '''down'''}, ] , ) @require_tf @unittest.skip('''Audio classification is not implemented for TF''' ) def _lowerCamelCase ( self ): pass
160
1
import inspect import jax import jax.lax as lax import jax.numpy as jnp from ..utils import add_start_docstrings from ..utils.logging import get_logger UpperCAmelCase__ = get_logger(__name__) UpperCAmelCase__ = r"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n" class __lowerCAmelCase : @add_start_docstrings(A) def __call__( self : Optional[int] , A : Optional[Any] , A : List[str]) -> jnp.ndarray: """simple docstring""" raise NotImplementedError( F"{self.__class__} is an abstract class. Only classes inheriting this class can be called.") class __lowerCAmelCase : @add_start_docstrings(A) def __call__( self : int , A : Optional[Any] , A : Dict) -> jnp.ndarray: """simple docstring""" raise NotImplementedError( F"{self.__class__} is an abstract class. Only classes inheriting this class can be called.") class __lowerCAmelCase ( _a ): @add_start_docstrings(A) def __call__( self : Tuple , A : List[Any] , A : int , A : Tuple , **A : Union[str, Any]) -> jnp.ndarray: """simple docstring""" for processor in self: _UpperCAmelCase = inspect.signature(processor.__call__).parameters if len(A) > 3: if not all(arg in kwargs for arg in list(function_args.keys())[2:]): raise ValueError( F"Make sure that all the required parameters: {list(function_args.keys())} for " F"{processor.__class__} are passed to the logits processor.") _UpperCAmelCase = processor(A , A , A , **A) else: _UpperCAmelCase = processor(A , A , A) return scores class __lowerCAmelCase ( _a ): def __init__( self : Tuple , A : int) -> int: """simple docstring""" if not isinstance(A , A) or not (temperature > 0): raise ValueError(F"`temperature` has to be a strictly positive float, but is {temperature}") _UpperCAmelCase = temperature def __call__( self : Any , A : List[str] , A : int , A : List[str]) -> jnp.ndarray: """simple docstring""" _UpperCAmelCase = scores / self.temperature return scores class __lowerCAmelCase ( _a ): def __init__( self : Dict , A : Any , A : Tuple = -float('Inf') , A : Dict = 1) -> str: """simple docstring""" if not isinstance(A , A) or (top_p < 0 or top_p > 1.0): raise ValueError(F"`top_p` has to be a float > 0 and < 1, but is {top_p}") if not isinstance(A , A) or (min_tokens_to_keep < 1): raise ValueError(F"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}") _UpperCAmelCase = top_p _UpperCAmelCase = filter_value _UpperCAmelCase = min_tokens_to_keep def __call__( self : List[str] , A : Optional[int] , A : List[str] , A : List[str]) -> jnp.ndarray: """simple docstring""" _UpperCAmelCase = lax.top_k(A , scores.shape[-1]) _UpperCAmelCase = jnp.full_like(A , self.filter_value) _UpperCAmelCase = jax.nn.softmax(A , axis=-1).cumsum(axis=-1) _UpperCAmelCase = cumulative_probs < self.top_p # include the token that is higher than top_p as well _UpperCAmelCase = jnp.roll(A , 1) score_mask |= score_mask.at[:, 0].set(A) # min tokens to keep _UpperCAmelCase = score_mask.at[:, : self.min_tokens_to_keep].set(A) _UpperCAmelCase = jnp.where(A , A , A) _UpperCAmelCase = jax.lax.sort_key_val(A , A)[-1] return next_scores class __lowerCAmelCase ( _a ): def __init__( self : List[str] , A : List[str] , A : List[str] = -float('Inf') , A : Dict = 1) -> Any: """simple docstring""" if not isinstance(A , A) or top_k <= 0: raise ValueError(F"`top_k` has to be a strictly positive integer, but is {top_k}") _UpperCAmelCase = max(A , A) _UpperCAmelCase = filter_value def __call__( self : Tuple , A : Any , A : Dict , A : Tuple) -> jnp.ndarray: """simple docstring""" _UpperCAmelCase = scores.shape _UpperCAmelCase = jnp.full(batch_size * vocab_size , self.filter_value) _UpperCAmelCase = min(self.top_k , scores.shape[-1]) # Safety check _UpperCAmelCase = lax.top_k(A , A) _UpperCAmelCase = jnp.broadcast_to((jnp.arange(A) * vocab_size)[:, None] , (batch_size, topk)).flatten() _UpperCAmelCase = topk_scores.flatten() _UpperCAmelCase = topk_indices.flatten() + shift _UpperCAmelCase = next_scores_flat.at[topk_indices_flat].set(A) _UpperCAmelCase = next_scores_flat.reshape(A , A) return next_scores class __lowerCAmelCase ( _a ): def __init__( self : Any , A : Union[str, Any]) -> str: """simple docstring""" _UpperCAmelCase = bos_token_id def __call__( self : Any , A : Union[str, Any] , A : List[str] , A : Optional[int]) -> jnp.ndarray: """simple docstring""" _UpperCAmelCase = jnp.full(scores.shape , -float('inf')) _UpperCAmelCase = 1 - jnp.bool_(cur_len - 1) _UpperCAmelCase = jnp.where(A , new_scores.at[:, self.bos_token_id].set(0) , A) return scores class __lowerCAmelCase ( _a ): def __init__( self : List[str] , A : List[str] , A : int) -> int: """simple docstring""" _UpperCAmelCase = max_length _UpperCAmelCase = eos_token_id def __call__( self : int , A : Optional[int] , A : Dict , A : Any) -> jnp.ndarray: """simple docstring""" _UpperCAmelCase = jnp.full(scores.shape , -float('inf')) _UpperCAmelCase = 1 - jnp.bool_(cur_len - self.max_length + 1) _UpperCAmelCase = jnp.where(A , new_scores.at[:, self.eos_token_id].set(0) , A) return scores class __lowerCAmelCase ( _a ): def __init__( self : List[Any] , A : Optional[Any] , A : Tuple) -> Dict: """simple docstring""" if not isinstance(A , A) or min_length < 0: raise ValueError(F"`min_length` has to be a positive integer, but is {min_length}") if not isinstance(A , A) or eos_token_id < 0: raise ValueError(F"`eos_token_id` has to be a positive integer, but is {eos_token_id}") _UpperCAmelCase = min_length _UpperCAmelCase = eos_token_id def __call__( self : Dict , A : Optional[int] , A : str , A : Optional[Any]) -> jnp.ndarray: """simple docstring""" _UpperCAmelCase = 1 - jnp.clip(cur_len - self.min_length , 0 , 1) _UpperCAmelCase = jnp.where(A , scores.at[:, self.eos_token_id].set(-float('inf')) , A) return scores class __lowerCAmelCase ( _a ): def __init__( self : List[Any] , A : Any , A : Tuple) -> int: """simple docstring""" _UpperCAmelCase = list(A) _UpperCAmelCase = begin_index def __call__( self : List[str] , A : Tuple , A : Optional[int] , A : Optional[int]) -> str: """simple docstring""" _UpperCAmelCase = 1 - jnp.bool_(cur_len - self.begin_index) _UpperCAmelCase = jnp.where(A , scores.at[:, self.begin_suppress_tokens].set(-float('inf')) , A) return scores class __lowerCAmelCase ( _a ): def __init__( self : str , A : int) -> int: """simple docstring""" _UpperCAmelCase = list(A) def __call__( self : Optional[int] , A : Any , A : Optional[int] , A : List[str]) -> jnp.ndarray: """simple docstring""" _UpperCAmelCase = scores.at[..., self.suppress_tokens].set(-float('inf')) return scores class __lowerCAmelCase ( _a ): def __init__( self : List[Any] , A : Optional[int]) -> str: """simple docstring""" _UpperCAmelCase = dict(A) # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the # index of the array corresponds to the index of the token to be forced, for XLA compatibility. # Indexes without forced tokens will have a negative value. _UpperCAmelCase = jnp.ones((max(force_token_map.keys()) + 1) , dtype=jnp.intaa) * -1 for index, token in force_token_map.items(): if token is not None: _UpperCAmelCase = force_token_array.at[index].set(A) _UpperCAmelCase = jnp.intaa(A) def __call__( self : Union[str, Any] , A : List[str] , A : Optional[Any] , A : List[str]) -> jnp.ndarray: """simple docstring""" def _force_token(A : int): _UpperCAmelCase = scores.shape[0] _UpperCAmelCase = self.force_token_array[generation_idx] _UpperCAmelCase = jnp.ones_like(A , dtype=scores.dtype) * -float('inf') _UpperCAmelCase = jnp.zeros((batch_size, 1) , dtype=scores.dtype) _UpperCAmelCase = lax.dynamic_update_slice(A , A , (0, current_token)) return new_scores _UpperCAmelCase = lax.cond( cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond( self.force_token_array[cur_len] >= 0 , lambda: _force_token(A) , lambda: scores , ) , ) return scores class __lowerCAmelCase ( _a ): def __init__( self : int , A : Any , A : Optional[Any] , A : List[str]) -> str: """simple docstring""" _UpperCAmelCase = generate_config.eos_token_id _UpperCAmelCase = generate_config.no_timestamps_token_id _UpperCAmelCase = generate_config.no_timestamps_token_id + 1 _UpperCAmelCase = decoder_input_length + 1 if generate_config.is_multilingual: # room for language token and task token self.begin_index += 2 if hasattr(A , 'max_initial_timestamp_index'): _UpperCAmelCase = generate_config.max_initial_timestamp_index else: _UpperCAmelCase = model_config.vocab_size if self.max_initial_timestamp_index is None: _UpperCAmelCase = model_config.vocab_size def __call__( self : List[str] , A : Optional[Any] , A : Tuple , A : Optional[int]) -> Any: """simple docstring""" _UpperCAmelCase = scores.at[:, self.no_timestamps_token_id].set(-float('inf')) def handle_pairs(A : List[str] , A : str): _UpperCAmelCase = jnp.where((cur_len - self.begin_index) >= 1 , A , A) _UpperCAmelCase = jnp.where( input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , A , ) _UpperCAmelCase = jnp.where((cur_len - self.begin_index) < 2 , A , A) _UpperCAmelCase = jnp.where( input_ids_k[cur_len - 2] >= self.timestamp_begin , A , A , ) return jnp.where( A , jnp.where( penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf')) , scores_k.at[: self.eos_token_id].set(-float('inf')) , ) , A , ) _UpperCAmelCase = jax.vmap(A)(A , A) _UpperCAmelCase = jnp.where(cur_len == self.begin_index , A , A) _UpperCAmelCase = jnp.where( self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , A , ) _UpperCAmelCase = self.timestamp_begin + self.max_initial_timestamp_index _UpperCAmelCase = jnp.where( A , scores.at[:, last_allowed + 1 :].set(-float('inf')) , A , ) # if sum of probability over timestamps is above any other token, sample timestamp _UpperCAmelCase = jax.nn.log_softmax(A , axis=-1) def handle_cumulative_probs(A : List[str] , A : Optional[Any]): _UpperCAmelCase = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1) _UpperCAmelCase = jnp.max(logprobs_k[: self.timestamp_begin]) return jnp.where( timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf')) , A , ) _UpperCAmelCase = jax.vmap(A)(A , A) return scores
368
import os from glob import glob import imageio import torch import torchvision import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil class __lowerCAmelCase : def __init__( self : Any , A : str = "cpu" , A : str = "openai/clip-vit-large-patch14") -> None: """simple docstring""" _UpperCAmelCase = device _UpperCAmelCase = CLIPTokenizerFast.from_pretrained(A) _UpperCAmelCase = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] _UpperCAmelCase = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] _UpperCAmelCase = torchvision.transforms.Normalize(self.image_mean , self.image_std) _UpperCAmelCase = torchvision.transforms.Resize(2_24) _UpperCAmelCase = torchvision.transforms.CenterCrop(2_24) def _lowerCamelCase ( self : str , A : Any) -> str: """simple docstring""" _UpperCAmelCase = self.resize(A) _UpperCAmelCase = self.center_crop(A) _UpperCAmelCase = self.normalize(A) return images def __call__( self : Any , A : Dict=None , A : Dict=None , **A : List[Any]) -> Dict: """simple docstring""" _UpperCAmelCase = self.tokenizer(text=A , **A) _UpperCAmelCase = self.preprocess_img(A) _UpperCAmelCase = {key: value.to(self.device) for (key, value) in encoding.items()} return encoding class __lowerCAmelCase ( nn.Module ): def __init__( self : List[Any] , A : Any=10 , A : List[Any]=0.0_1 , A : Optional[int]=None , A : int=None , A : Dict=None , A : Tuple=None , A : str=None , A : Dict=None , A : Union[str, Any]=False , A : Any=True , A : Any="image" , A : Tuple=True , A : List[Any]=False , A : int=False , A : int=False , ) -> None: """simple docstring""" super().__init__() _UpperCAmelCase = None _UpperCAmelCase = device if device else get_device() if vqgan: _UpperCAmelCase = vqgan else: _UpperCAmelCase = load_vqgan(self.device , conf_path=A , ckpt_path=A) self.vqgan.eval() if clip: _UpperCAmelCase = clip else: _UpperCAmelCase = CLIPModel.from_pretrained('openai/clip-vit-base-patch32') self.clip.to(self.device) _UpperCAmelCase = ProcessorGradientFlow(device=self.device) _UpperCAmelCase = iterations _UpperCAmelCase = lr _UpperCAmelCase = log _UpperCAmelCase = make_grid _UpperCAmelCase = return_val _UpperCAmelCase = quantize _UpperCAmelCase = self.vqgan.decoder.z_shape def _lowerCamelCase ( self : Optional[int] , A : int=None , A : Union[str, Any]=None , A : Dict=5 , A : Optional[Any]=True) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = [] if output_path is None: _UpperCAmelCase = './animation.gif' if input_path is None: _UpperCAmelCase = self.save_path _UpperCAmelCase = sorted(glob(input_path + '/*')) if not len(A): raise ValueError( 'No images found in save path, aborting (did you pass save_intermediate=True to the generate' ' function?)') if len(A) == 1: print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)') _UpperCAmelCase = total_duration / len(A) _UpperCAmelCase = [frame_duration] * len(A) if extend_frames: _UpperCAmelCase = 1.5 _UpperCAmelCase = 3 for file_name in paths: if file_name.endswith('.png'): images.append(imageio.imread(A)) imageio.mimsave(A , A , duration=A) print(F"gif saved to {output_path}") def _lowerCamelCase ( self : List[str] , A : Optional[Any]=None , A : Optional[int]=None) -> int: """simple docstring""" if not (path or img): raise ValueError('Input either path or tensor') if img is not None: raise NotImplementedError _UpperCAmelCase = preprocess(Image.open(A) , target_image_size=2_56).to(self.device) _UpperCAmelCase = preprocess_vqgan(A) _UpperCAmelCase , *_UpperCAmelCase = self.vqgan.encode(A) return z def _lowerCamelCase ( self : List[str] , A : int) -> Dict: """simple docstring""" _UpperCAmelCase = self.latent.detach().requires_grad_() _UpperCAmelCase = base_latent + transform_vector if self.quantize: _UpperCAmelCase , *_UpperCAmelCase = self.vqgan.quantize(A) else: _UpperCAmelCase = trans_latent return self.vqgan.decode(A) def _lowerCamelCase ( self : Any , A : Dict , A : Dict , A : Optional[Any]=None) -> Any: """simple docstring""" _UpperCAmelCase = self.clip_preprocessor(text=A , images=A , return_tensors='pt' , padding=A) _UpperCAmelCase = self.clip(**A) _UpperCAmelCase = clip_outputs.logits_per_image if weights is not None: _UpperCAmelCase = similarity_logits * weights return similarity_logits.sum() def _lowerCamelCase ( self : Optional[int] , A : Dict , A : int , A : Tuple) -> str: """simple docstring""" _UpperCAmelCase = self._get_clip_similarity(pos_prompts['prompts'] , A , weights=(1 / pos_prompts['weights'])) if neg_prompts: _UpperCAmelCase = self._get_clip_similarity(neg_prompts['prompts'] , A , weights=neg_prompts['weights']) else: _UpperCAmelCase = torch.tensor([1] , device=self.device) _UpperCAmelCase = -torch.log(A) + torch.log(A) return loss def _lowerCamelCase ( self : Tuple , A : Optional[int] , A : List[Any] , A : Optional[int]) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = torch.randn_like(self.latent , requires_grad=A , device=self.device) _UpperCAmelCase = torch.optim.Adam([vector] , lr=self.lr) for i in range(self.iterations): optim.zero_grad() _UpperCAmelCase = self._add_vector(A) _UpperCAmelCase = loop_post_process(A) _UpperCAmelCase = self._get_CLIP_loss(A , A , A) print('CLIP loss' , A) if self.log: wandb.log({'CLIP Loss': clip_loss}) clip_loss.backward(retain_graph=A) optim.step() if self.return_val == "image": yield custom_to_pil(transformed_img[0]) else: yield vector def _lowerCamelCase ( self : Dict , A : Any , A : Optional[int] , A : str) -> Any: """simple docstring""" wandb.init(reinit=A , project='face-editor') wandb.config.update({'Positive Prompts': positive_prompts}) wandb.config.update({'Negative Prompts': negative_prompts}) wandb.config.update({'lr': self.lr, 'iterations': self.iterations}) if image_path: _UpperCAmelCase = Image.open(A) _UpperCAmelCase = image.resize((2_56, 2_56)) wandb.log('Original Image' , wandb.Image(A)) def _lowerCamelCase ( self : Dict , A : int) -> Dict: """simple docstring""" if not prompts: return [] _UpperCAmelCase = [] _UpperCAmelCase = [] if isinstance(A , A): _UpperCAmelCase = [prompt.strip() for prompt in prompts.split('|')] for prompt in prompts: if isinstance(A , (tuple, list)): _UpperCAmelCase = prompt[0] _UpperCAmelCase = float(prompt[1]) elif ":" in prompt: _UpperCAmelCase , _UpperCAmelCase = prompt.split(':') _UpperCAmelCase = float(A) else: _UpperCAmelCase = prompt _UpperCAmelCase = 1.0 processed_prompts.append(A) weights.append(A) return { "prompts": processed_prompts, "weights": torch.tensor(A , device=self.device), } def _lowerCamelCase ( self : Optional[int] , A : Union[str, Any] , A : Union[str, Any]=None , A : int=None , A : Optional[Any]=True , A : Dict=False , A : Union[str, Any]=True , A : Any=True , A : Any=None , ) -> Dict: """simple docstring""" if image_path: _UpperCAmelCase = self._get_latent(A) else: _UpperCAmelCase = torch.randn(self.latent_dim , device=self.device) if self.log: self._init_logging(A , A , A) assert pos_prompts, "You must provide at least one positive prompt." _UpperCAmelCase = self.process_prompts(A) _UpperCAmelCase = self.process_prompts(A) if save_final and save_path is None: _UpperCAmelCase = os.path.join('./outputs/' , '_'.join(pos_prompts['prompts'])) if not os.path.exists(A): os.makedirs(A) else: _UpperCAmelCase = save_path + '_' + get_timestamp() os.makedirs(A) _UpperCAmelCase = save_path _UpperCAmelCase = self.vqgan.decode(self.latent)[0] if show_intermediate: print('Original Image') show_pil(custom_to_pil(A)) _UpperCAmelCase = loop_post_process(A) for iter, transformed_img in enumerate(self._optimize_CLIP(A , A , A)): if show_intermediate: show_pil(A) if save_intermediate: transformed_img.save(os.path.join(self.save_path , F"iter_{iter:03d}.png")) if self.log: wandb.log({'Image': wandb.Image(A)}) if show_final: show_pil(A) if save_final: transformed_img.save(os.path.join(self.save_path , F"iter_{iter:03d}_final.png"))
290
0
from queue import PriorityQueue from typing import Any import numpy as np def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : set , SCREAMING_SNAKE_CASE__ : set , SCREAMING_SNAKE_CASE__ : dict , SCREAMING_SNAKE_CASE__ : dict , SCREAMING_SNAKE_CASE__ : PriorityQueue , SCREAMING_SNAKE_CASE__ : dict , SCREAMING_SNAKE_CASE__ : float | int , ): for nxt, d in graph[v]: if nxt in visited_forward: continue __UpperCamelCase =cst_fwd.get(SCREAMING_SNAKE_CASE__ , np.inf ) __UpperCamelCase =cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) __UpperCamelCase =new_cost_f __UpperCamelCase =v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: __UpperCamelCase =cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : dict , SCREAMING_SNAKE_CASE__ : dict ): __UpperCamelCase =-1 __UpperCamelCase =set() __UpperCamelCase =set() __UpperCamelCase ={source: 0} __UpperCamelCase ={destination: 0} __UpperCamelCase ={source: None} __UpperCamelCase ={destination: None} __UpperCamelCase =PriorityQueue() __UpperCamelCase =PriorityQueue() __UpperCamelCase =np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): __UpperCamelCase , __UpperCamelCase =queue_forward.get() visited_forward.add(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase , __UpperCamelCase =queue_backward.get() visited_backward.add(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =pass_and_relaxation( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) __UpperCamelCase =pass_and_relaxation( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: __UpperCamelCase =shortest_distance return shortest_path_distance _A = { 'B': [['C', 1]], 'C': [['D', 1]], 'D': [['F', 1]], 'E': [['B', 1], ['G', 2]], 'F': [], 'G': [['F', 1]], } _A = { 'B': [['E', 1]], 'C': [['B', 1]], 'D': [['C', 1]], 'F': [['D', 1], ['G', 1]], 'E': [[None, np.inf]], 'G': [['E', 2]], } if __name__ == "__main__": import doctest doctest.testmod()
62
"""simple docstring""" import os import tempfile import unittest import uuid from pathlib import Path from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available if is_torch_available(): import torch if is_soundfile_availble(): import soundfile as sf if is_vision_available(): from PIL import Image def __lowercase ( _a="" ): snake_case_ : List[str] = tempfile.mkdtemp() return os.path.join(_a , str(uuid.uuida() ) + suffix ) @require_soundfile @require_torch class _UpperCAmelCase ( unittest.TestCase): def _snake_case ( self : str ): snake_case_ : int = torch.rand(12 , dtype=torch.floataa ) - 0.5 snake_case_ : Optional[int] = AgentAudio(lowercase_ ) snake_case_ : List[str] = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(lowercase_ , agent_type.to_raw() , atol=1E-4 ) ) del agent_type # Ensure the path remains even after the object deletion self.assertTrue(os.path.exists(lowercase_ ) ) # Ensure that the file contains the same value as the original tensor snake_case_, snake_case_ : int = sf.read(lowercase_ ) self.assertTrue(torch.allclose(lowercase_ , torch.tensor(lowercase_ ) , atol=1E-4 ) ) def _snake_case ( self : Optional[int] ): snake_case_ : Any = torch.rand(12 , dtype=torch.floataa ) - 0.5 snake_case_ : List[str] = get_new_path(suffix='''.wav''' ) sf.write(lowercase_ , lowercase_ , 16000 ) snake_case_ : Tuple = AgentAudio(lowercase_ ) self.assertTrue(torch.allclose(lowercase_ , agent_type.to_raw() , atol=1E-4 ) ) self.assertEqual(agent_type.to_string() , lowercase_ ) @require_vision @require_torch class _UpperCAmelCase ( unittest.TestCase): def _snake_case ( self : Tuple ): snake_case_ : List[Any] = torch.randint(0 , 256 , (64, 64, 3) ) snake_case_ : str = AgentImage(lowercase_ ) snake_case_ : Union[str, Any] = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(lowercase_ , agent_type._tensor , atol=1E-4 ) ) self.assertIsInstance(agent_type.to_raw() , Image.Image ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(lowercase_ ) ) def _snake_case ( self : str ): snake_case_ : Any = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' snake_case_ : Optional[int] = Image.open(lowercase_ ) snake_case_ : Tuple = AgentImage(lowercase_ ) self.assertTrue(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(lowercase_ ) ) def _snake_case ( self : str ): snake_case_ : int = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' snake_case_ : Dict = Image.open(lowercase_ ) snake_case_ : List[str] = AgentImage(lowercase_ ) self.assertFalse(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(lowercase_ ) ) class _UpperCAmelCase ( unittest.TestCase): def _snake_case ( self : Any ): snake_case_ : Tuple = '''Hey!''' snake_case_ : Optional[Any] = AgentText(lowercase_ ) self.assertEqual(lowercase_ , agent_type.to_string() ) self.assertEqual(lowercase_ , agent_type.to_raw() ) self.assertEqual(lowercase_ , lowercase_ )
264
0
"""simple docstring""" def __lowerCamelCase ( a_ : str ) -> bool: if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError('''check_bouncy() accepts only integer arguments''' ) __SCREAMING_SNAKE_CASE :List[str] = str(__lowerCAmelCase ) __SCREAMING_SNAKE_CASE :Dict = ''''''.join(sorted(__lowerCAmelCase ) ) return sorted_str_n != str_n and sorted_str_n[::-1] != str_n def __lowerCamelCase ( a_ : str = 99 ) -> int: if not 0 < percent < 1_00: raise ValueError('''solution() only accepts values from 0 to 100''' ) __SCREAMING_SNAKE_CASE :int = 0 __SCREAMING_SNAKE_CASE :Any = 1 while True: if check_bouncy(__lowerCAmelCase ): bouncy_num += 1 if (bouncy_num / num) * 1_00 >= percent: return num num += 1 if __name__ == "__main__": from doctest import testmod testmod() print(f'{solution(9_9)}')
350
"""simple docstring""" from torch import nn class _SCREAMING_SNAKE_CASE( nn.Module ): def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> List[str]: """simple docstring""" super().__init__() __SCREAMING_SNAKE_CASE :Tuple = class_size __SCREAMING_SNAKE_CASE :str = embed_size # self.mlp1 = nn.Linear(embed_size, embed_size) # self.mlp2 = (nn.Linear(embed_size, class_size)) __SCREAMING_SNAKE_CASE :Optional[Any] = nn.Linear(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE :Optional[Any] = self.mlp(SCREAMING_SNAKE_CASE__ ) return logits
239
0
import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def a( A : List[str] ) -> Optional[int]: """simple docstring""" a = tmp_path / "file.csv" a = textwrap.dedent( "\\n header1,header2\n 1,2\n 10,20\n " ) with open(A , "w" ) as f: f.write(A ) return str(A ) @pytest.fixture def a( A : str ) -> str: """simple docstring""" a = tmp_path / "malformed_file.csv" a = textwrap.dedent( "\\n header1,header2\n 1,2\n 10,20,\n " ) with open(A , "w" ) as f: f.write(A ) return str(A ) @pytest.fixture def a( A : Tuple , A : Tuple ) -> int: """simple docstring""" a = tmp_path / "csv_with_image.csv" a = textwrap.dedent( f'''\ image {image_file} ''' ) with open(A , "w" ) as f: f.write(A ) return str(A ) @pytest.fixture def a( A : Optional[Any] ) -> Optional[int]: """simple docstring""" a = tmp_path / "csv_with_label.csv" a = textwrap.dedent( "\\n label\n good\n bad\n good\n " ) with open(A , "w" ) as f: f.write(A ) return str(A ) @pytest.fixture def a( A : Dict ) -> int: """simple docstring""" a = tmp_path / "csv_with_int_list.csv" a = textwrap.dedent( "\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n " ) with open(A , "w" ) as f: f.write(A ) return str(A ) def a( A : List[str] , A : List[Any] , A : int ) -> List[str]: """simple docstring""" a = Csv() a = csv._generate_tables([[csv_file, malformed_csv_file]] ) with pytest.raises(A , match="Error tokenizing data" ): for _ in generator: pass assert any( record.levelname == "ERROR" and "Failed to read file" in record.message and os.path.basename(A ) in record.message for record in caplog.records ) @require_pil def a( A : Any ) -> Optional[int]: """simple docstring""" with open(A , encoding="utf-8" ) as f: a = f.read().splitlines()[1] a = Csv(encoding="utf-8" , features=Features({"image": Image()} ) ) a = csv._generate_tables([[csv_file_with_image]] ) a = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field("image" ).type == Image()() a = pa_table.to_pydict()["image"] assert generated_content == [{"path": image_file, "bytes": None}] def a( A : Optional[int] ) -> str: """simple docstring""" with open(A , encoding="utf-8" ) as f: a = f.read().splitlines()[1:] a = Csv(encoding="utf-8" , features=Features({"label": ClassLabel(names=["good", "bad"] )} ) ) a = csv._generate_tables([[csv_file_with_label]] ) a = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field("label" ).type == ClassLabel(names=["good", "bad"] )() a = pa_table.to_pydict()["label"] assert generated_content == [ClassLabel(names=["good", "bad"] ).straint(A ) for label in labels] def a( A : Dict ) -> Dict: """simple docstring""" a = Csv(encoding="utf-8" , sep="," , converters={"int_list": lambda A : [int(A ) for i in x.split()]} ) a = csv._generate_tables([[csv_file_with_int_list]] ) a = pa.concat_tables([table for _, table in generator] ) assert pa.types.is_list(pa_table.schema.field("int_list" ).type ) a = pa_table.to_pydict()["int_list"] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
227
import argparse import torch from datasets import load_dataset from donut import DonutModel from transformers import ( DonutImageProcessor, DonutProcessor, DonutSwinConfig, DonutSwinModel, MBartConfig, MBartForCausalLM, VisionEncoderDecoderModel, XLMRobertaTokenizerFast, ) def a( A : Tuple ) -> Optional[Any]: """simple docstring""" a = model.config a = DonutSwinConfig( image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , ) a = MBartConfig( is_decoder=A , is_encoder_decoder=A , add_cross_attention=A , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len( model.decoder.tokenizer ) , scale_embedding=A , add_final_layer_norm=A , ) return encoder_config, decoder_config def a( A : List[Any] ) -> Union[str, Any]: """simple docstring""" if "encoder.model" in name: a = name.replace("encoder.model" , "encoder" ) if "decoder.model" in name: a = name.replace("decoder.model" , "decoder" ) if "patch_embed.proj" in name: a = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: a = name.replace("patch_embed.norm" , "embeddings.norm" ) if name.startswith("encoder" ): if "layers" in name: a = "encoder." + name if "attn.proj" in name: a = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name and "mask" not in name: a = name.replace("attn" , "attention.self" ) if "norm1" in name: a = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: a = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: a = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: a = name.replace("mlp.fc2" , "output.dense" ) if name == "encoder.norm.weight": a = "encoder.layernorm.weight" if name == "encoder.norm.bias": a = "encoder.layernorm.bias" return name def a( A : Union[str, Any] , A : Tuple ) -> List[Any]: """simple docstring""" for key in orig_state_dict.copy().keys(): a = orig_state_dict.pop(A ) if "qkv" in key: a = key.split("." ) a = int(key_split[3] ) a = int(key_split[5] ) a = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: a = val[:dim, :] a = val[dim : dim * 2, :] a = val[-dim:, :] else: a = val[:dim] a = val[dim : dim * 2] a = val[-dim:] elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]: # HuggingFace implementation doesn't use attn_mask buffer # and model doesn't use final LayerNorms for the encoder pass else: a = val return orig_state_dict def a( A : List[Any] , A : Tuple=None , A : List[Any]=False ) -> Optional[int]: """simple docstring""" a = DonutModel.from_pretrained(A ).eval() # load HuggingFace model a , a = get_configs(A ) a = DonutSwinModel(A ) a = MBartForCausalLM(A ) a = VisionEncoderDecoderModel(encoder=A , decoder=A ) model.eval() a = original_model.state_dict() a = convert_state_dict(A , A ) model.load_state_dict(A ) # verify results on scanned document a = load_dataset("hf-internal-testing/example-documents" ) a = dataset["test"][0]["image"].convert("RGB" ) a = XLMRobertaTokenizerFast.from_pretrained(A , from_slow=A ) a = DonutImageProcessor( do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] ) a = DonutProcessor(A , A ) a = processor(A , return_tensors="pt" ).pixel_values if model_name == "naver-clova-ix/donut-base-finetuned-docvqa": a = "<s_docvqa><s_question>{user_input}</s_question><s_answer>" a = "When is the coffee break?" a = task_prompt.replace("{user_input}" , A ) elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip": a = "<s_rvlcdip>" elif model_name in [ "naver-clova-ix/donut-base-finetuned-cord-v1", "naver-clova-ix/donut-base-finetuned-cord-v1-2560", ]: a = "<s_cord>" elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2": a = "s_cord-v2>" elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket": a = "<s_zhtrainticket>" elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]: # use a random prompt a = "hello world" else: raise ValueError("Model name not supported" ) a = original_model.decoder.tokenizer(A , add_special_tokens=A , return_tensors="pt" )[ "input_ids" ] a = original_model.encoder.model.patch_embed(A ) a , a = model.encoder.embeddings(A ) assert torch.allclose(A , A , atol=1e-3 ) # verify encoder hidden states a = original_model.encoder(A ) a = model.encoder(A ).last_hidden_state assert torch.allclose(A , A , atol=1e-2 ) # verify decoder hidden states a = original_model(A , A , A ).logits a = model(A , decoder_input_ids=A ).logits assert torch.allclose(A , A , atol=1e-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(f'''Saving model and processor to {pytorch_dump_folder_path}''' ) model.save_pretrained(A ) processor.save_pretrained(A ) if push_to_hub: model.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" ) processor.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" ) if __name__ == "__main__": _lowercase: Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="naver-clova-ix/donut-base-finetuned-docvqa", required=False, type=str, help="Name of the original model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, required=False, type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model and processor to the 🤗 hub.", ) _lowercase: Optional[Any] = parser.parse_args() convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
227
1
'''simple docstring''' def _a ( _lowerCamelCase ) -> Dict: """simple docstring""" __snake_case : str = 0 __snake_case : Optional[int] = len(_lowerCamelCase ) for i in range(n - 1 ): for j in range(i + 1 , _lowerCamelCase ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def _a ( _lowerCamelCase ) -> Tuple: """simple docstring""" if len(_lowerCamelCase ) <= 1: return arr, 0 __snake_case : Any = len(_lowerCamelCase ) // 2 __snake_case : List[str] = arr[0:mid] __snake_case : int = arr[mid:] __snake_case , __snake_case : List[Any] = count_inversions_recursive(_lowerCamelCase ) __snake_case , __snake_case : Tuple = count_inversions_recursive(_lowerCamelCase ) __snake_case , __snake_case : str = _count_cross_inversions(_lowerCamelCase , _lowerCamelCase ) __snake_case : str = inversion_p + inversions_q + cross_inversions return c, num_inversions def _a ( _lowerCamelCase , _lowerCamelCase ) -> int: """simple docstring""" __snake_case : Any = [] __snake_case : List[str] = 0 while i < len(_lowerCamelCase ) and j < len(_lowerCamelCase ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(_lowerCamelCase ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(_lowerCamelCase ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def _a ( ) -> Optional[int]: """simple docstring""" __snake_case : Optional[Any] = [10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) __snake_case : Optional[Any] = count_inversions_bf(_lowerCamelCase ) __snake_case , __snake_case : Union[str, Any] = count_inversions_recursive(_lowerCamelCase ) assert num_inversions_bf == num_inversions_recursive == 8 print("""number of inversions = """ , _lowerCamelCase ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() __snake_case : Any = count_inversions_bf(_lowerCamelCase ) __snake_case , __snake_case : Union[str, Any] = count_inversions_recursive(_lowerCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , _lowerCamelCase ) # an empty list should also have zero inversions __snake_case : List[Any] = [] __snake_case : List[Any] = count_inversions_bf(_lowerCamelCase ) __snake_case , __snake_case : List[Any] = count_inversions_recursive(_lowerCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , _lowerCamelCase ) if __name__ == "__main__": main()
13
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class _A ( unittest.TestCase ): @slow def lowercase__ ( self : List[str] ) -> int: """simple docstring""" __snake_case : List[Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" ) __snake_case : Tuple = tf.convert_to_tensor( [[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" __snake_case : List[str] = model(__magic_name__ )["""last_hidden_state"""] __snake_case : Any = tf.TensorShape((1, 10, 7_68) ) self.assertEqual(output.shape , __magic_name__ ) # compare the actual values for a slice. __snake_case : str = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
13
1
"""simple docstring""" def _snake_case ( snake_case__ : List[str] ): A = 1 A = 2 while i * i <= n: A = 0 while n % i == 0: n //= i multiplicity += 1 n_divisors *= multiplicity + 1 i += 1 if n > 1: n_divisors *= 2 return n_divisors def _snake_case ( ): A = 1 A = 1 while True: i += 1 t_num += i if count_divisors(snake_case__ ) > 500: break return t_num if __name__ == "__main__": print(solution())
74
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[Any] ) -> Optional[Any]: for model_result in results.values(): for batch_size, sequence_length in zip(model_result['bs'] ,model_result['ss'] ): A = model_result['result'][batch_size][sequence_length] self.assertIsNotNone(A_ ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: A = 'sshleifer/tiny-gpt2' A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ) A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: A = 'sgugger/tiny-distilbert-classification' A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,only_pretrain_model=A_ ,) A = PyTorchBenchmark(A_ ) A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]: A = 'sshleifer/tiny-gpt2' A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,torchscript=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ) A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == 'cpu' ,'Cant do half precision' ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: A = 'sshleifer/tiny-gpt2' A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,fpaa=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ) A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]: A = 'sshleifer/tiny-gpt2' A = AutoConfig.from_pretrained(A_ ) # set architectures equal to `None` A = None A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ,configs=[config] ) A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]: A = 'sshleifer/tiny-gpt2' A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ) A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == 'cpu' ,'Can\'t do half precision' ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]: A = 'sshleifer/tiny-gpt2' A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,fpaa=A_ ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ) A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: A = 'sshleifer/tiny-gpt2' A = AutoConfig.from_pretrained(A_ ) A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ,configs=[config] ) A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: A = 'sshleifer/tinier_bart' A = AutoConfig.from_pretrained(A_ ) A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ,configs=[config] ) A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: A = 'sshleifer/tiny-gpt2' A = AutoConfig.from_pretrained(A_ ) A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ,configs=[config] ) A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: A = 'sshleifer/tinier_bart' A = AutoConfig.from_pretrained(A_ ) A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ,configs=[config] ) A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: A = 'sshleifer/tiny-gpt2' with tempfile.TemporaryDirectory() as tmp_dir: A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,save_to_csv=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(A_ ,'inf_time.csv' ) ,train_memory_csv_file=os.path.join(A_ ,'train_mem.csv' ) ,inference_memory_csv_file=os.path.join(A_ ,'inf_mem.csv' ) ,train_time_csv_file=os.path.join(A_ ,'train_time.csv' ) ,env_info_csv_file=os.path.join(A_ ,'env.csv' ) ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ) benchmark.run() self.assertTrue(Path(os.path.join(A_ ,'inf_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(A_ ,'train_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(A_ ,'inf_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(A_ ,'train_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(A_ ,'env.csv' ) ).exists() ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]: A = 'sshleifer/tiny-gpt2' def _check_summary_is_not_empty(A_ : Optional[int] ): self.assertTrue(hasattr(A_ ,'sequential' ) ) self.assertTrue(hasattr(A_ ,'cumulative' ) ) self.assertTrue(hasattr(A_ ,'current' ) ) self.assertTrue(hasattr(A_ ,'total' ) ) with tempfile.TemporaryDirectory() as tmp_dir: A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(A_ ,'log.txt' ) ,log_print=A_ ,trace_memory_line_by_line=A_ ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ) A = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(A_ ,'log.txt' ) ).exists() )
74
1
import numpy as np def snake_case ( snake_case__ :np.array) -> np.array: return (2 / (1 + np.exp(-2 * vector))) - 1 if __name__ == "__main__": import doctest doctest.testmod()
81
def snake_case ( snake_case__ :str , snake_case__ :str) -> list: _A = len(snake_case__) _A = [] for i in range(len(snake_case__) - pat_len + 1): _A = True for j in range(snake_case__): if s[i + j] != pattern[j]: _A = False break if match_found: position.append(snake_case__) return position if __name__ == "__main__": assert naive_pattern_search('ABCDEFG', 'DE') == [3] print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
81
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __snake_case = logging.get_logger(__name__) __snake_case = { '''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''', } class lowercase ( A__ , A__ ): """simple docstring""" _a = 'resnet' _a = ['basic', 'bottleneck'] def __init__( self , UpperCamelCase_=3 , UpperCamelCase_=64 , UpperCamelCase_=[256, 512, 1024, 2048] , UpperCamelCase_=[3, 4, 6, 3] , UpperCamelCase_="bottleneck" , UpperCamelCase_="relu" , UpperCamelCase_=False , UpperCamelCase_=None , UpperCamelCase_=None , **UpperCamelCase_ , ): '''simple docstring''' super().__init__(**UpperCamelCase_ ) if layer_type not in self.layer_types: raise ValueError(F'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' ) UpperCamelCase__ :Optional[Any] = num_channels UpperCamelCase__ :int = embedding_size UpperCamelCase__ :Dict = hidden_sizes UpperCamelCase__ :int = depths UpperCamelCase__ :int = layer_type UpperCamelCase__ :List[Any] = hidden_act UpperCamelCase__ :Optional[int] = downsample_in_first_stage UpperCamelCase__ :Tuple = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(UpperCamelCase_ ) + 1 )] UpperCamelCase__ , UpperCamelCase__ :Tuple = get_aligned_output_features_output_indices( out_features=UpperCamelCase_ , out_indices=UpperCamelCase_ , stage_names=self.stage_names ) class lowercase ( A__ ): """simple docstring""" _a = version.parse('1.11' ) @property def lowerCAmelCase__ ( self ): '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def lowerCAmelCase__ ( self ): '''simple docstring''' return 1e-3
97
'''simple docstring''' from scipy.stats import pearsonr import datasets __snake_case = ''' Pearson correlation coefficient and p-value for testing non-correlation. The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. ''' __snake_case = ''' Args: predictions (`list` of `int`): Predicted class labels, as returned by a model. references (`list` of `int`): Ground truth labels. return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`. Returns: pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation. p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities. Examples: Example 1-A simple example using only predictions and references. >>> pearsonr_metric = datasets.load_metric("pearsonr") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5]) >>> print(round(results[\'pearsonr\'], 2)) -0.74 Example 2-The same as Example 1, but that also returns the `p-value`. >>> pearsonr_metric = datasets.load_metric("pearsonr") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True) >>> print(sorted(list(results.keys()))) [\'p-value\', \'pearsonr\'] >>> print(round(results[\'pearsonr\'], 2)) -0.74 >>> print(round(results[\'p-value\'], 2)) 0.15 ''' __snake_case = ''' @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, Ilhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Antonio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase ( datasets.Metric ): """simple docstring""" def lowerCAmelCase__ ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float''' ), '''references''': datasets.Value('''float''' ), } ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , ) def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ): '''simple docstring''' if return_pvalue: UpperCamelCase__ :Any = pearsonr(UpperCamelCase_ , UpperCamelCase_ ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(UpperCamelCase_ , UpperCamelCase_ )[0] )}
97
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __A : Dict = logging.get_logger(__name__) __A : Union[str, Any] = { '''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''', # See all CANINE models at https://huggingface.co/models?filter=canine } class __A ( lowerCAmelCase ): lowerCAmelCase_ : List[Any] = "canine" def __init__( self : Tuple , UpperCAmelCase_ : Any=768 , UpperCAmelCase_ : List[Any]=12 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : str=3072 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Dict=16384 , UpperCAmelCase_ : List[str]=16 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : Tuple=1E-12 , UpperCAmelCase_ : int=0 , UpperCAmelCase_ : List[str]=0xe_000 , UpperCAmelCase_ : Dict=0xe_001 , UpperCAmelCase_ : List[str]=4 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : List[Any]=8 , UpperCAmelCase_ : List[Any]=16384 , UpperCAmelCase_ : Optional[int]=128 , **UpperCAmelCase_ : List[Any] , ): super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ ) lowerCAmelCase : Optional[Any] = max_position_embeddings lowerCAmelCase : Any = hidden_size lowerCAmelCase : List[Any] = num_hidden_layers lowerCAmelCase : str = num_attention_heads lowerCAmelCase : Any = intermediate_size lowerCAmelCase : Dict = hidden_act lowerCAmelCase : str = hidden_dropout_prob lowerCAmelCase : int = attention_probs_dropout_prob lowerCAmelCase : List[str] = initializer_range lowerCAmelCase : Any = type_vocab_size lowerCAmelCase : Tuple = layer_norm_eps # Character config: lowerCAmelCase : int = downsampling_rate lowerCAmelCase : Optional[int] = upsampling_kernel_size lowerCAmelCase : Optional[Any] = num_hash_functions lowerCAmelCase : Dict = num_hash_buckets lowerCAmelCase : Any = local_transformer_stride
323
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> int: '''simple docstring''' if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): raise ValueError('String lengths must match!' ) lowerCAmelCase : Tuple = 0 for chara, chara in zip(_UpperCAmelCase, _UpperCAmelCase ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
323
1
'''simple docstring''' import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert import BertTokenizer _A : Any = logging.get_logger(__name__) _A : List[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} _A : Optional[Any] = { '''vocab_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } _A : Union[str, Any] = { '''vocab_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } _A : Optional[int] = { '''vocab_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json''' ), }, } _A : str = { '''facebook/dpr-ctx_encoder-single-nq-base''': 512, '''facebook/dpr-ctx_encoder-multiset-base''': 512, } _A : Any = { '''facebook/dpr-question_encoder-single-nq-base''': 512, '''facebook/dpr-question_encoder-multiset-base''': 512, } _A : Optional[int] = { '''facebook/dpr-reader-single-nq-base''': 512, '''facebook/dpr-reader-multiset-base''': 512, } _A : List[Any] = { '''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True}, } _A : Tuple = { '''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True}, } _A : int = { '''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True}, } class _lowercase ( UpperCAmelCase__ ): '''simple docstring''' _SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES _SCREAMING_SNAKE_CASE : int = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP _SCREAMING_SNAKE_CASE : Optional[int] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _SCREAMING_SNAKE_CASE : int = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION class _lowercase ( UpperCAmelCase__ ): '''simple docstring''' _SCREAMING_SNAKE_CASE : List[Any] = VOCAB_FILES_NAMES _SCREAMING_SNAKE_CASE : Tuple = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP _SCREAMING_SNAKE_CASE : Dict = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _SCREAMING_SNAKE_CASE : int = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION _A : List[str] = collections.namedtuple( '''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text'''] ) _A : List[Any] = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits''']) _A : Union[str, Any] = r''' Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: ``` [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> ``` Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `\'tf\'`: Return TensorFlow `tf.constant` objects. - `\'pt\'`: Return PyTorch `torch.Tensor` objects. - `\'np\'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer\'s default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Returns: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. ''' @add_start_docstrings(UpperCAmelCase__ ) class _lowercase : '''simple docstring''' def __call__( self : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[str] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = None , SCREAMING_SNAKE_CASE__ : Union[bool, str] = False , SCREAMING_SNAKE_CASE__ : Union[bool, str] = False , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , **SCREAMING_SNAKE_CASE__ : List[Any] , ) -> BatchEncoding: if titles is None and texts is None: return super().__call__( SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) elif titles is None or texts is None: __lowerCAmelCase = titles if texts is None else texts return super().__call__( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) __lowerCAmelCase = titles if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else [titles] __lowerCAmelCase = texts if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else [texts] __lowerCAmelCase = len(SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = questions if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else [questions] * n_passages if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ): raise ValueError( f"""There should be as many titles than texts but got {len(SCREAMING_SNAKE_CASE__ )} titles and {len(SCREAMING_SNAKE_CASE__ )} texts.""" ) __lowerCAmelCase = super().__call__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ )["""input_ids"""] __lowerCAmelCase = super().__call__(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ )["""input_ids"""] __lowerCAmelCase = { """input_ids""": [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ] } if return_attention_mask is not False: __lowerCAmelCase = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) __lowerCAmelCase = attention_mask return self.pad(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : BatchEncoding , SCREAMING_SNAKE_CASE__ : DPRReaderOutput , SCREAMING_SNAKE_CASE__ : int = 16 , SCREAMING_SNAKE_CASE__ : int = 64 , SCREAMING_SNAKE_CASE__ : int = 4 , ) -> List[DPRSpanPrediction]: __lowerCAmelCase = reader_input["""input_ids"""] __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = reader_output[:3] __lowerCAmelCase = len(SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = sorted(range(SCREAMING_SNAKE_CASE__ ) , reverse=SCREAMING_SNAKE_CASE__ , key=relevance_logits.__getitem__ ) __lowerCAmelCase = [] for doc_id in sorted_docs: __lowerCAmelCase = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence __lowerCAmelCase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: __lowerCAmelCase = sequence_ids.index(self.pad_token_id ) else: __lowerCAmelCase = len(SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=SCREAMING_SNAKE_CASE__ , top_spans=SCREAMING_SNAKE_CASE__ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=SCREAMING_SNAKE_CASE__ , start_index=SCREAMING_SNAKE_CASE__ , end_index=SCREAMING_SNAKE_CASE__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(SCREAMING_SNAKE_CASE__ ) >= num_spans: break return nbest_spans_predictions[:num_spans] def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , ) -> List[DPRSpanPrediction]: __lowerCAmelCase = [] for start_index, start_score in enumerate(SCREAMING_SNAKE_CASE__ ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) __lowerCAmelCase = sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x[1] , reverse=SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = [] for (start_index, end_index), score in scores: if start_index > end_index: raise ValueError(f"""Wrong span indices: [{start_index}:{end_index}]""" ) __lowerCAmelCase = end_index - start_index + 1 if length > max_answer_length: raise ValueError(f"""Span is too long: {length} > {max_answer_length}""" ) if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(SCREAMING_SNAKE_CASE__ ) == top_spans: break return chosen_span_intervals @add_end_docstrings(UpperCAmelCase__ ) class _lowercase ( UpperCAmelCase__ , UpperCAmelCase__ ): '''simple docstring''' _SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES _SCREAMING_SNAKE_CASE : List[str] = READER_PRETRAINED_VOCAB_FILES_MAP _SCREAMING_SNAKE_CASE : Optional[Any] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _SCREAMING_SNAKE_CASE : Tuple = READER_PRETRAINED_INIT_CONFIGURATION _SCREAMING_SNAKE_CASE : Optional[int] = ["""input_ids""", """attention_mask"""]
229
'''simple docstring''' def UpperCamelCase_ ( snake_case_ : list[int] , snake_case_ : list[int] ) -> tuple[float, float]: '''simple docstring''' if not len(snake_case_ ) == len(snake_case_ ) == 3: raise ValueError("""Please enter a valid equation.""" ) if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError("""Both a & b of two equations can't be zero.""" ) # Extract the coefficients __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = equationa __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = equationa # Calculate the determinants of the matrices __lowerCAmelCase = aa * ba - aa * ba __lowerCAmelCase = ca * ba - ca * ba __lowerCAmelCase = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError("""Infinite solutions. (Consistent system)""" ) else: raise ValueError("""No solution. (Inconsistent system)""" ) else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: __lowerCAmelCase = determinant_x / determinant __lowerCAmelCase = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
229
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = {} class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = """llama""" lowercase_ = ["""past_key_values"""] def __init__(self : Optional[Any] , UpperCAmelCase_ : str=32_000 , UpperCAmelCase_ : Optional[int]=4_096 , UpperCAmelCase_ : Dict=11_008 , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : List[Any]="silu" , UpperCAmelCase_ : str=2_048 , UpperCAmelCase_ : Tuple=0.02 , UpperCAmelCase_ : List[Any]=1E-6 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : List[str]=0 , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : List[Any]=1 , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : Any=None , **UpperCAmelCase_ : str , ) ->Optional[Any]: '''simple docstring''' lowerCamelCase__: List[Any] =vocab_size lowerCamelCase__: Union[str, Any] =max_position_embeddings lowerCamelCase__: Dict =hidden_size lowerCamelCase__: Dict =intermediate_size lowerCamelCase__: Any =num_hidden_layers lowerCamelCase__: str =num_attention_heads # for backward compatibility if num_key_value_heads is None: lowerCamelCase__: List[Any] =num_attention_heads lowerCamelCase__: Optional[Any] =num_key_value_heads lowerCamelCase__: List[str] =hidden_act lowerCamelCase__: Dict =initializer_range lowerCamelCase__: Tuple =rms_norm_eps lowerCamelCase__: Dict =pretraining_tp lowerCamelCase__: List[Any] =use_cache lowerCamelCase__: Tuple =rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , tie_word_embeddings=_lowercase , **_lowercase , ) def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Union[str, Any]: '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling , _lowercase) or len(self.rope_scaling) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " F"""got {self.rope_scaling}""") lowerCamelCase__: Optional[Any] =self.rope_scaling.get("type" , _lowercase) lowerCamelCase__: Optional[Any] =self.rope_scaling.get("factor" , _lowercase) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""") if rope_scaling_factor is None or not isinstance(_lowercase , _lowercase) or rope_scaling_factor <= 1.0: raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""")
366
from pathlib import Path from typing import List from transformers import is_torch_available, is_vision_available from transformers.testing_utils import get_tests_dir, is_tool_test from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText if is_torch_available(): import torch if is_vision_available(): from PIL import Image __A = ["text", "image", "audio"] def lowerCAmelCase_ ( __a ) -> Optional[Any]: """simple docstring""" lowerCamelCase__: Tuple =[] for input_type in input_types: if input_type == "text": inputs.append("Text input" ) elif input_type == "image": inputs.append( Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) ) elif input_type == "audio": inputs.append(torch.ones(3000 ) ) elif isinstance(__a , __a ): inputs.append(create_inputs(__a ) ) else: raise ValueError(F"""Invalid type requested: {input_type}""" ) return inputs def lowerCAmelCase_ ( __a ) -> Union[str, Any]: """simple docstring""" lowerCamelCase__: Union[str, Any] =[] for output in outputs: if isinstance(__a , (str, AgentText) ): output_types.append("text" ) elif isinstance(__a , (Image.Image, AgentImage) ): output_types.append("image" ) elif isinstance(__a , (torch.Tensor, AgentAudio) ): output_types.append("audio" ) else: raise ValueError(F"""Invalid output: {output}""" ) return output_types @is_tool_test class _SCREAMING_SNAKE_CASE : '''simple docstring''' def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Dict: '''simple docstring''' self.assertTrue(hasattr(self.tool , "inputs")) self.assertTrue(hasattr(self.tool , "outputs")) lowerCamelCase__: Tuple =self.tool.inputs for _input in inputs: if isinstance(_input , UpperCAmelCase_): for __input in _input: self.assertTrue(__input in authorized_types) else: self.assertTrue(_input in authorized_types) lowerCamelCase__: Optional[Any] =self.tool.outputs for _output in outputs: self.assertTrue(_output in authorized_types) def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->str: '''simple docstring''' lowerCamelCase__: List[str] =create_inputs(self.tool.inputs) lowerCamelCase__: str =self.tool(*UpperCAmelCase_) # There is a single output if len(self.tool.outputs) == 1: lowerCamelCase__: Optional[Any] =[outputs] self.assertListEqual(output_types(UpperCAmelCase_) , self.tool.outputs) def SCREAMING_SNAKE_CASE_ (self : Dict) ->Any: '''simple docstring''' self.assertTrue(hasattr(self.tool , "description")) self.assertTrue(hasattr(self.tool , "default_checkpoint")) self.assertTrue(self.tool.description.startswith("This is a tool that")) def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Optional[int]: '''simple docstring''' lowerCamelCase__: str =create_inputs(self.tool.inputs) lowerCamelCase__: Dict =self.tool(*UpperCAmelCase_) if not isinstance(UpperCAmelCase_ , UpperCAmelCase_): lowerCamelCase__: Tuple =[outputs] self.assertEqual(len(UpperCAmelCase_) , len(self.tool.outputs)) for output, output_type in zip(UpperCAmelCase_ , self.tool.outputs): lowerCamelCase__: Any =AGENT_TYPE_MAPPING[output_type] self.assertTrue(isinstance(UpperCAmelCase_ , UpperCAmelCase_)) def SCREAMING_SNAKE_CASE_ (self : Dict) ->str: '''simple docstring''' lowerCamelCase__: Any =create_inputs(self.tool.inputs) lowerCamelCase__: int =[] for _input, input_type in zip(UpperCAmelCase_ , self.tool.inputs): if isinstance(UpperCAmelCase_ , UpperCAmelCase_): _inputs.append([AGENT_TYPE_MAPPING[_input_type](_input) for _input_type in input_type]) else: _inputs.append(AGENT_TYPE_MAPPING[input_type](_input)) # Should not raise an error lowerCamelCase__: Union[str, Any] =self.tool(*UpperCAmelCase_) if not isinstance(UpperCAmelCase_ , UpperCAmelCase_): lowerCamelCase__: str =[outputs] self.assertEqual(len(UpperCAmelCase_) , len(self.tool.outputs))
273
0
import comet # From: unbabel-comet import torch import datasets lowerCamelCase = datasets.logging.get_logger(__name__) lowerCamelCase = '''\ @inproceedings{rei-EtAl:2020:WMT, author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon}, title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task}, booktitle = {Proceedings of the Fifth Conference on Machine Translation}, month = {November}, year = {2020}, address = {Online}, publisher = {Association for Computational Linguistics}, pages = {909--918}, } @inproceedings{rei-etal-2020-comet, title = "{COMET}: A Neural Framework for {MT} Evaluation", author = "Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon", booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", month = nov, year = "2020", address = "Online", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/2020.emnlp-main.213", pages = "2685--2702", } ''' lowerCamelCase = '''\ Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM). With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition. See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information. ''' lowerCamelCase = ''' COMET score. Args: `sources` (list of str): Source sentences `predictions` (list of str): candidate translations `references` (list of str): reference translations `cuda` (bool): If set to True, runs COMET using GPU `show_progress` (bool): Shows progress `model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None. Returns: `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`. `scores`: List of scores. Examples: >>> comet_metric = datasets.load_metric(\'comet\') >>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use >>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."] >>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"] >>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"] >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source) >>> print([round(v, 2) for v in results["scores"]]) [0.19, 0.92] ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __magic_name__ ( datasets.Metric ): '''simple docstring''' def _UpperCAmelCase ( self ) -> int: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, homepage='''https://unbabel.github.io/COMET/html/index.html''', inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { '''sources''': datasets.Value('''string''', id='''sequence''' ), '''predictions''': datasets.Value('''string''', id='''sequence''' ), '''references''': datasets.Value('''string''', id='''sequence''' ), } ), codebase_urls=['''https://github.com/Unbabel/COMET'''], reference_urls=[ '''https://github.com/Unbabel/COMET''', '''https://www.aclweb.org/anthology/2020.emnlp-main.213/''', '''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''', ], ) def _UpperCAmelCase ( self, lowercase_ ) -> int: """simple docstring""" if self.config_name == "default": a__ =comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) ) else: a__ =comet.load_from_checkpoint(comet.download_model(self.config_name ) ) def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_=None, lowercase_=False ) -> List[Any]: """simple docstring""" if gpus is None: a__ =1 if torch.cuda.is_available() else 0 a__ ={'''src''': sources, '''mt''': predictions, '''ref''': references} a__ =[dict(zip(lowercase_, lowercase_ ) ) for t in zip(*data.values() )] a__, a__ =self.scorer.predict(lowercase_, gpus=lowercase_, progress_bar=lowercase_ ) return {"mean_score": mean_score, "scores": scores}
188
import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ : List[Any] = IFInpaintingSuperResolutionPipeline lowerCamelCase__ : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'} lowerCamelCase__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} ) lowerCamelCase__ : str = PipelineTesterMixin.required_optional_params - {'latents'} def _UpperCAmelCase ( self ) -> Optional[int]: """simple docstring""" return self._get_superresolution_dummy_components() def _UpperCAmelCase ( self, lowercase_, lowercase_=0 ) -> Tuple: """simple docstring""" if str(lowercase_ ).startswith('''mps''' ): a__ =torch.manual_seed(lowercase_ ) else: a__ =torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) a__ =floats_tensor((1, 3, 16, 16), rng=random.Random(lowercase_ ) ).to(lowercase_ ) a__ =floats_tensor((1, 3, 32, 32), rng=random.Random(lowercase_ ) ).to(lowercase_ ) a__ =floats_tensor((1, 3, 32, 32), rng=random.Random(lowercase_ ) ).to(lowercase_ ) a__ ={ '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''original_image''': original_image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', ) def _UpperCAmelCase ( self ) -> List[str]: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def _UpperCAmelCase ( self ) -> int: """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''', reason='''float16 requires CUDA''' ) def _UpperCAmelCase ( self ) -> List[Any]: """simple docstring""" super().test_save_load_floataa(expected_max_diff=1E-1 ) def _UpperCAmelCase ( self ) -> int: """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def _UpperCAmelCase ( self ) -> Optional[Any]: """simple docstring""" self._test_save_load_local() def _UpperCAmelCase ( self ) -> str: """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1E-2, )
188
1
import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class a__ : def __init__( self , _A , _A=1_4 , _A=7 , _A=True , _A=True , _A=False , _A=True , _A=9_9 , _A=3_2 , _A=4 , _A=4 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=0.02 , ): """simple docstring""" __lowerCAmelCase = parent __lowerCAmelCase = batch_size __lowerCAmelCase = seq_length __lowerCAmelCase = is_training __lowerCAmelCase = use_input_mask __lowerCAmelCase = use_token_type_ids __lowerCAmelCase = use_labels __lowerCAmelCase = vocab_size __lowerCAmelCase = hidden_size __lowerCAmelCase = rotary_dim __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_act __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = initializer_range __lowerCAmelCase = None __lowerCAmelCase = vocab_size - 1 __lowerCAmelCase = vocab_size - 1 __lowerCAmelCase = vocab_size - 1 def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCAmelCase = None if self.use_input_mask: __lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCAmelCase = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=_A , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.prepare_config_and_inputs() __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs __lowerCAmelCase = {"input_ids": input_ids, "attention_mask": attention_mask} return config, inputs_dict def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A ): """simple docstring""" __lowerCAmelCase = 2_0 __lowerCAmelCase = model_class_name(_A ) __lowerCAmelCase = model.init_cache(input_ids.shape[0] , _A ) __lowerCAmelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="i4" ) __lowerCAmelCase = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) __lowerCAmelCase = model( input_ids[:, :-1] , attention_mask=_A , past_key_values=_A , position_ids=_A , ) __lowerCAmelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" ) __lowerCAmelCase = model( input_ids[:, -1:] , attention_mask=_A , past_key_values=outputs_cache.past_key_values , position_ids=_A , ) __lowerCAmelCase = model(_A ) __lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A ): """simple docstring""" __lowerCAmelCase = 2_0 __lowerCAmelCase = model_class_name(_A ) __lowerCAmelCase = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) __lowerCAmelCase = model.init_cache(input_ids.shape[0] , _A ) __lowerCAmelCase = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) __lowerCAmelCase = model( input_ids[:, :-1] , attention_mask=_A , past_key_values=_A , position_ids=_A , ) __lowerCAmelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" ) __lowerCAmelCase = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=_A , position_ids=_A , ) __lowerCAmelCase = model(_A , attention_mask=_A ) __lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" ) @require_flax class a__ ( snake_case__ , snake_case__ , unittest.TestCase ): _a : str = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () _a : Any = (FlaxGPTJForCausalLM,) if is_flax_available() else () def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = FlaxGPTJModelTester(self ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" for model_class_name in self.all_model_classes: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(_A , _A , _A , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" for model_class_name in self.all_model_classes: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( _A , _A , _A , _A ) @tooslow def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = GPTaTokenizer.from_pretrained("gpt2" , pad_token="<|endoftext|>" , padding_side="left" ) __lowerCAmelCase = tokenizer(["Hello this is a long string", "Hey"] , return_tensors="np" , padding=_A , truncation=_A ) __lowerCAmelCase = FlaxGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B" ) __lowerCAmelCase = False __lowerCAmelCase = model.config.eos_token_id __lowerCAmelCase = jax.jit(model.generate ) __lowerCAmelCase = jit_generate( inputs["input_ids"] , attention_mask=inputs["attention_mask"] , pad_token_id=tokenizer.pad_token_id ).sequences __lowerCAmelCase = tokenizer.batch_decode(_A , skip_special_tokens=_A ) __lowerCAmelCase = [ "Hello this is a long string of text.\n\nI'm trying to get the text of the", "Hey, I'm a little late to the party. I'm going to", ] self.assertListEqual(_A , _A ) @is_pt_flax_cross_test def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs __lowerCAmelCase = self._prepare_for_class(_A , _A ) __lowerCAmelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class __lowerCAmelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning __lowerCAmelCase = getattr(_A , _A ) __lowerCAmelCase , __lowerCAmelCase = pt_inputs["input_ids"].shape __lowerCAmelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(_A ): __lowerCAmelCase = 0 __lowerCAmelCase = 1 __lowerCAmelCase = 0 __lowerCAmelCase = 1 __lowerCAmelCase = pt_model_class(_A ).eval() __lowerCAmelCase = model_class(_A , dtype=jnp.floataa ) __lowerCAmelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _A ) __lowerCAmelCase = fx_state with torch.no_grad(): __lowerCAmelCase = pt_model(**_A ).to_tuple() __lowerCAmelCase = fx_model(**_A ).to_tuple() self.assertEqual(len(_A ) , len(_A ) , "Output lengths differ between Flax and PyTorch" ) for fx_output, pt_output in zip(_A , _A ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(_A ) __lowerCAmelCase = model_class.from_pretrained(_A , from_pt=_A ) __lowerCAmelCase = fx_model_loaded(**_A ).to_tuple() self.assertEqual( len(_A ) , len(_A ) , "Output lengths differ between Flax and PyTorch" ) for fx_output_loaded, pt_output in zip(_A , _A ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @is_pt_flax_cross_test def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs __lowerCAmelCase = self._prepare_for_class(_A , _A ) __lowerCAmelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class __lowerCAmelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning __lowerCAmelCase = getattr(_A , _A ) __lowerCAmelCase = pt_model_class(_A ).eval() __lowerCAmelCase = model_class(_A , dtype=jnp.floataa ) __lowerCAmelCase = load_flax_weights_in_pytorch_model(_A , fx_model.params ) __lowerCAmelCase , __lowerCAmelCase = pt_inputs["input_ids"].shape __lowerCAmelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(_A ): __lowerCAmelCase = 0 __lowerCAmelCase = 1 __lowerCAmelCase = 0 __lowerCAmelCase = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): __lowerCAmelCase = pt_model(**_A ).to_tuple() __lowerCAmelCase = fx_model(**_A ).to_tuple() self.assertEqual(len(_A ) , len(_A ) , "Output lengths differ between Flax and PyTorch" ) for fx_output, pt_output in zip(_A , _A ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(_A ) __lowerCAmelCase = pt_model_class.from_pretrained(_A , from_flax=_A ) with torch.no_grad(): __lowerCAmelCase = pt_model_loaded(**_A ).to_tuple() self.assertEqual( len(_A ) , len(_A ) , "Output lengths differ between Flax and PyTorch" ) for fx_output, pt_output in zip(_A , _A ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @tooslow def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" for model_class_name in self.all_model_classes: __lowerCAmelCase = model_class_name.from_pretrained("EleutherAI/gpt-j-6B" ) __lowerCAmelCase = model(np.ones((1, 1) ) ) self.assertIsNotNone(_A )
102
import math def _a ( SCREAMING_SNAKE_CASE_ : int ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _a ( SCREAMING_SNAKE_CASE_ : int = 1_00_01 ): try: __lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ ) except (TypeError, ValueError): raise TypeError("Parameter nth must be int or castable to int." ) from None if nth <= 0: raise ValueError("Parameter nth must be greater than or equal to one." ) __lowerCAmelCase = [] __lowerCAmelCase = 2 while len(SCREAMING_SNAKE_CASE_ ) < nth: if is_prime(SCREAMING_SNAKE_CASE_ ): primes.append(SCREAMING_SNAKE_CASE_ ) num += 1 else: num += 1 return primes[len(SCREAMING_SNAKE_CASE_ ) - 1] if __name__ == "__main__": print(f'''{solution() = }''')
102
1
import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple: a_ : Optional[int] = 1 a_ : Union[str, Any] = 3 a_ : Union[str, Any] = (3_2, 3_2) a_ : str = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) return image @property def SCREAMING_SNAKE_CASE ( self : Tuple ) -> str: torch.manual_seed(0 ) a_ : List[Any] = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , ) return model @property def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]: torch.manual_seed(0 ) a_ : int = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) return model @property def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple: torch.manual_seed(0 ) a_ : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) return CLIPTextModel(SCREAMING_SNAKE_CASE__ ) @property def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]: def extract(*SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Optional[int] ): class SCREAMING_SNAKE_CASE__ : def __init__( self : Union[str, Any] ) -> List[Any]: a_ : str = torch.ones([0] ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple ) -> str: self.pixel_values.to(SCREAMING_SNAKE_CASE__ ) return self return Out() return extract def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: a_ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator a_ : Optional[Any] = self.dummy_cond_unet a_ : List[str] = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , ) a_ : Optional[Any] = self.dummy_vae a_ : int = self.dummy_text_encoder a_ : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) # make sure here that pndm scheduler skips prk a_ : List[Any] = StableDiffusionPipeline( unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , vae=SCREAMING_SNAKE_CASE__ , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , safety_checker=SCREAMING_SNAKE_CASE__ , feature_extractor=self.dummy_extractor , ) a_ : List[str] = sd_pipe.to(SCREAMING_SNAKE_CASE__ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Optional[int] = 'A painting of a squirrel eating a burger' a_ : str = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(0 ) a_ : List[Any] = sd_pipe([prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' ) a_ : str = output.images a_ : str = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(0 ) a_ : Optional[Any] = sd_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=SCREAMING_SNAKE_CASE__ , )[0] a_ : str = image[0, -3:, -3:, -1] a_ : int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) a_ : Dict = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: a_ : str = 'cpu' # ensure determinism for the device-dependent torch.Generator a_ : Dict = self.dummy_cond_unet a_ : List[Any] = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE__ ) a_ : Any = self.dummy_vae a_ : Optional[Any] = self.dummy_text_encoder a_ : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) # make sure here that pndm scheduler skips prk a_ : Any = StableDiffusionPipeline( unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , vae=SCREAMING_SNAKE_CASE__ , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , safety_checker=SCREAMING_SNAKE_CASE__ , feature_extractor=self.dummy_extractor , ) a_ : Dict = sd_pipe.to(SCREAMING_SNAKE_CASE__ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : int = 'A painting of a squirrel eating a burger' a_ : Any = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(0 ) a_ : Any = sd_pipe([prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' ) a_ : Any = output.images a_ : Optional[int] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(0 ) a_ : Dict = sd_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=SCREAMING_SNAKE_CASE__ , )[0] a_ : Tuple = image[0, -3:, -3:, -1] a_ : List[str] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) a_ : Any = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: a_ : Optional[Any] = StableDiffusionPipeline.from_pretrained( 'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=SCREAMING_SNAKE_CASE__ ) assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) assert isinstance(pipe.scheduler , SCREAMING_SNAKE_CASE__ ) assert pipe.safety_checker is None a_ : Union[str, Any] = pipe('example prompt' , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(SCREAMING_SNAKE_CASE__ ) a_ : List[str] = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE__ ) # sanity check that the pipeline still works assert pipe.safety_checker is None a_ : str = pipe('example prompt' , num_inference_steps=2 ).images[0] assert image is not None @unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: a_ : Union[str, Any] = self.dummy_cond_unet a_ : Dict = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE__ ) a_ : Any = self.dummy_vae a_ : Tuple = self.dummy_text_encoder a_ : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) # put models in fp16 a_ : Union[str, Any] = unet.half() a_ : Dict = vae.half() a_ : Tuple = bert.half() # make sure here that pndm scheduler skips prk a_ : Tuple = StableDiffusionPipeline( unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , vae=SCREAMING_SNAKE_CASE__ , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , safety_checker=SCREAMING_SNAKE_CASE__ , feature_extractor=self.dummy_extractor , ) a_ : Union[str, Any] = sd_pipe.to(SCREAMING_SNAKE_CASE__ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Dict = 'A painting of a squirrel eating a burger' a_ : Tuple = sd_pipe([prompt] , num_inference_steps=2 , output_type='np' ).images assert image.shape == (1, 6_4, 6_4, 3) @nightly @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: a_ : Dict = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=SCREAMING_SNAKE_CASE__ ) a_ : Optional[int] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) a_ : Optional[Any] = sd_pipe.to(SCREAMING_SNAKE_CASE__ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = ( 'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle' ' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with' ' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and' ' children from bahnhof zoo, detailed ' ) a_ : Dict = 4_0_0_3_6_6_0_3_4_6 a_ : Union[str, Any] = 7 # without safety guidance (sld_guidance_scale = 0) a_ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) a_ : str = sd_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=SCREAMING_SNAKE_CASE__ , num_inference_steps=5_0 , output_type='np' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , ) a_ : List[str] = output.images a_ : Union[str, Any] = image[0, -3:, -3:, -1] a_ : Any = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176] assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 # without safety guidance (strong configuration) a_ : Optional[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) a_ : str = sd_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=SCREAMING_SNAKE_CASE__ , num_inference_steps=5_0 , output_type='np' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) a_ : str = output.images a_ : Any = image[0, -3:, -3:, -1] a_ : Optional[Any] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719] assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: a_ : int = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=SCREAMING_SNAKE_CASE__ ) a_ : Tuple = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) a_ : Union[str, Any] = sd_pipe.to(SCREAMING_SNAKE_CASE__ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : int = 'padme amidala taking a bath artwork, safe for work, no nudity' a_ : Any = 2_7_3_4_9_7_1_7_5_5 a_ : str = 7 a_ : Tuple = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) a_ : Dict = sd_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=SCREAMING_SNAKE_CASE__ , num_inference_steps=5_0 , output_type='np' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , ) a_ : Optional[int] = output.images a_ : Any = image[0, -3:, -3:, -1] a_ : List[str] = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297] assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 a_ : Optional[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) a_ : Any = sd_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=SCREAMING_SNAKE_CASE__ , num_inference_steps=5_0 , output_type='np' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) a_ : Optional[Any] = output.images a_ : Any = image[0, -3:, -3:, -1] a_ : Any = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443] assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def SCREAMING_SNAKE_CASE ( self : Any ) -> int: a_ : Dict = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' ) a_ : Optional[int] = sd_pipe.to(SCREAMING_SNAKE_CASE__ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = ( 'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.' ' leyendecker' ) a_ : List[Any] = 1_0_4_4_3_5_5_2_3_4 a_ : int = 1_2 a_ : str = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = sd_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=SCREAMING_SNAKE_CASE__ , num_inference_steps=5_0 , output_type='np' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , ) a_ : Dict = output.images a_ : Optional[int] = image[0, -3:, -3:, -1] a_ : int = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ) assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7 a_ : Union[str, Any] = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = sd_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=SCREAMING_SNAKE_CASE__ , num_inference_steps=5_0 , output_type='np' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) a_ : Tuple = output.images a_ : str = image[0, -3:, -3:, -1] a_ : str = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] ) assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
32
import jax.numpy as jnp from ...utils import logging from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel from .configuration_mta import MTaConfig UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__) UpperCAmelCase_ : str = 'T5Config' def SCREAMING_SNAKE_CASE_ ( __A : jnp.array , __A : int , __A : int ) -> jnp.ndarray: """simple docstring""" a_ : Dict = jnp.zeros_like(__A ) a_ : Dict = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] ) a_ : str = shifted_input_ids.at[:, 0].set(__A ) a_ : int = jnp.where(shifted_input_ids == -1_00 , __A , __A ) return shifted_input_ids class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : str = '''mt5''' snake_case__ : List[Any] = MTaConfig class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : str = '''mt5''' snake_case__ : List[str] = MTaConfig class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : Any = '''mt5''' snake_case__ : Union[str, Any] = MTaConfig
32
1
"""simple docstring""" import argparse import io import requests import torch from omegaconf import OmegaConf from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, renew_vae_resnet_paths, ) def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' _a : Optional[Any] = checkpoint _a : List[Any] = {} _a : Tuple = vae_state_dict["""encoder.conv_in.weight"""] _a : str = vae_state_dict["""encoder.conv_in.bias"""] _a : Tuple = vae_state_dict["""encoder.conv_out.weight"""] _a : Dict = vae_state_dict["""encoder.conv_out.bias"""] _a : List[Any] = vae_state_dict["""encoder.norm_out.weight"""] _a : Dict = vae_state_dict["""encoder.norm_out.bias"""] _a : Tuple = vae_state_dict["""decoder.conv_in.weight"""] _a : Union[str, Any] = vae_state_dict["""decoder.conv_in.bias"""] _a : Any = vae_state_dict["""decoder.conv_out.weight"""] _a : Optional[Any] = vae_state_dict["""decoder.conv_out.bias"""] _a : Any = vae_state_dict["""decoder.norm_out.weight"""] _a : str = vae_state_dict["""decoder.norm_out.bias"""] _a : Any = vae_state_dict["""quant_conv.weight"""] _a : Optional[Any] = vae_state_dict["""quant_conv.bias"""] _a : Union[str, Any] = vae_state_dict["""post_quant_conv.weight"""] _a : Dict = vae_state_dict["""post_quant_conv.bias"""] # Retrieves the keys for the encoder down blocks only _a : int = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """encoder.down""" in layer} ) _a : Tuple = { layer_id: [key for key in vae_state_dict if F"""down.{layer_id}""" in key] for layer_id in range(UpperCamelCase__ ) } # Retrieves the keys for the decoder up blocks only _a : Any = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """decoder.up""" in layer} ) _a : Optional[Any] = { layer_id: [key for key in vae_state_dict if F"""up.{layer_id}""" in key] for layer_id in range(UpperCamelCase__ ) } for i in range(UpperCamelCase__ ): _a : str = [key for key in down_blocks[i] if F"""down.{i}""" in key and F"""down.{i}.downsample""" not in key] if F"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict: _a : Any = vae_state_dict.pop( F"""encoder.down.{i}.downsample.conv.weight""" ) _a : Union[str, Any] = vae_state_dict.pop( F"""encoder.down.{i}.downsample.conv.bias""" ) _a : Any = renew_vae_resnet_paths(UpperCamelCase__ ) _a : Any = {"""old""": F"""down.{i}.block""", """new""": F"""down_blocks.{i}.resnets"""} assign_to_checkpoint(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , additional_replacements=[meta_path] , config=UpperCamelCase__ ) _a : Dict = [key for key in vae_state_dict if """encoder.mid.block""" in key] _a : Optional[int] = 2 for i in range(1 , num_mid_res_blocks + 1 ): _a : str = [key for key in mid_resnets if F"""encoder.mid.block_{i}""" in key] _a : Tuple = renew_vae_resnet_paths(UpperCamelCase__ ) _a : Any = {"""old""": F"""mid.block_{i}""", """new""": F"""mid_block.resnets.{i - 1}"""} assign_to_checkpoint(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , additional_replacements=[meta_path] , config=UpperCamelCase__ ) _a : Optional[int] = [key for key in vae_state_dict if """encoder.mid.attn""" in key] _a : str = renew_vae_attention_paths(UpperCamelCase__ ) _a : List[Any] = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""} assign_to_checkpoint(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , additional_replacements=[meta_path] , config=UpperCamelCase__ ) conv_attn_to_linear(UpperCamelCase__ ) for i in range(UpperCamelCase__ ): _a : Union[str, Any] = num_up_blocks - 1 - i _a : Union[str, Any] = [ key for key in up_blocks[block_id] if F"""up.{block_id}""" in key and F"""up.{block_id}.upsample""" not in key ] if F"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict: _a : Union[str, Any] = vae_state_dict[ F"""decoder.up.{block_id}.upsample.conv.weight""" ] _a : Optional[int] = vae_state_dict[ F"""decoder.up.{block_id}.upsample.conv.bias""" ] _a : int = renew_vae_resnet_paths(UpperCamelCase__ ) _a : int = {"""old""": F"""up.{block_id}.block""", """new""": F"""up_blocks.{i}.resnets"""} assign_to_checkpoint(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , additional_replacements=[meta_path] , config=UpperCamelCase__ ) _a : int = [key for key in vae_state_dict if """decoder.mid.block""" in key] _a : List[Any] = 2 for i in range(1 , num_mid_res_blocks + 1 ): _a : int = [key for key in mid_resnets if F"""decoder.mid.block_{i}""" in key] _a : Dict = renew_vae_resnet_paths(UpperCamelCase__ ) _a : List[str] = {"""old""": F"""mid.block_{i}""", """new""": F"""mid_block.resnets.{i - 1}"""} assign_to_checkpoint(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , additional_replacements=[meta_path] , config=UpperCamelCase__ ) _a : Any = [key for key in vae_state_dict if """decoder.mid.attn""" in key] _a : str = renew_vae_attention_paths(UpperCamelCase__ ) _a : str = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""} assign_to_checkpoint(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , additional_replacements=[meta_path] , config=UpperCamelCase__ ) conv_attn_to_linear(UpperCamelCase__ ) return new_checkpoint def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , ): '''simple docstring''' # Only support V1 _a : List[Any] = requests.get( """ https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml""" ) _a : Dict = io.BytesIO(r.content ) _a : Dict = OmegaConf.load(UpperCamelCase__ ) _a : Union[str, Any] = 5_1_2 _a : Tuple = """cuda""" if torch.cuda.is_available() else """cpu""" if checkpoint_path.endswith("""safetensors""" ): from safetensors import safe_open _a : str = {} with safe_open(UpperCamelCase__ , framework="""pt""" , device="""cpu""" ) as f: for key in f.keys(): _a : List[Any] = f.get_tensor(UpperCamelCase__ ) else: _a : str = torch.load(UpperCamelCase__ , map_location=UpperCamelCase__ )["""state_dict"""] # Convert the VAE model. _a : List[Any] = create_vae_diffusers_config(UpperCamelCase__ , image_size=UpperCamelCase__ ) _a : Tuple = custom_convert_ldm_vae_checkpoint(UpperCamelCase__ , UpperCamelCase__ ) _a : Optional[Any] = AutoencoderKL(**UpperCamelCase__ ) vae.load_state_dict(UpperCamelCase__ ) vae.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.') parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.') _snake_case = parser.parse_args() vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
324
"""simple docstring""" import numpy as np def lowerCAmelCase__ ( UpperCamelCase__ ): '''simple docstring''' return 1 / (1 + np.exp(-vector )) def lowerCAmelCase__ ( UpperCamelCase__ ): '''simple docstring''' return vector * sigmoid(1.702 * vector ) if __name__ == "__main__": import doctest doctest.testmod()
324
1
'''simple docstring''' import argparse import os from . import ( ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BART_PRETRAINED_MODEL_ARCHIVE_LIST, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, BartConfig, BertConfig, CamembertConfig, CTRLConfig, DistilBertConfig, DPRConfig, ElectraConfig, FlaubertConfig, GPTaConfig, LayoutLMConfig, LxmertConfig, OpenAIGPTConfig, RobertaConfig, TaConfig, TFAlbertForPreTraining, TFBartForConditionalGeneration, TFBartForSequenceClassification, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFCamembertForMaskedLM, TFCTRLLMHeadModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, TFElectraForPreTraining, TFFlaubertWithLMHeadModel, TFGPTaLMHeadModel, TFLayoutLMForMaskedLM, TFLxmertForPreTraining, TFLxmertVisualFeatureEncoder, TFOpenAIGPTLMHeadModel, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForSequenceClassification, TFTaForConditionalGeneration, TFTransfoXLLMHeadModel, TFWavaVecaModel, TFXLMRobertaForMaskedLM, TFXLMWithLMHeadModel, TFXLNetLMHeadModel, TransfoXLConfig, WavaVecaConfig, WavaVecaModel, XLMConfig, XLMRobertaConfig, XLNetConfig, is_torch_available, load_pytorch_checkpoint_in_tfa_model, ) from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging if is_torch_available(): import numpy as np import torch from . import ( AlbertForPreTraining, BartForConditionalGeneration, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, CamembertForMaskedLM, CTRLLMHeadModel, DistilBertForMaskedLM, DistilBertForQuestionAnswering, DPRContextEncoder, DPRQuestionEncoder, DPRReader, ElectraForPreTraining, FlaubertWithLMHeadModel, GPTaLMHeadModel, LayoutLMForMaskedLM, LxmertForPreTraining, LxmertVisualFeatureEncoder, OpenAIGPTLMHeadModel, RobertaForMaskedLM, RobertaForSequenceClassification, TaForConditionalGeneration, TransfoXLLMHeadModel, XLMRobertaForMaskedLM, XLMWithLMHeadModel, XLNetLMHeadModel, ) logging.set_verbosity_info() lowercase : str = { 'bart': ( BartConfig, TFBartForConditionalGeneration, TFBartForSequenceClassification, BartForConditionalGeneration, BART_PRETRAINED_MODEL_ARCHIVE_LIST, ), 'bert': ( BertConfig, TFBertForPreTraining, BertForPreTraining, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'bert-base-cased-finetuned-mrpc': ( BertConfig, TFBertForSequenceClassification, BertForSequenceClassification, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'dpr': ( DPRConfig, TFDPRQuestionEncoder, TFDPRContextEncoder, TFDPRReader, DPRQuestionEncoder, DPRContextEncoder, DPRReader, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ), 'gpt2': ( GPTaConfig, TFGPTaLMHeadModel, GPTaLMHeadModel, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'xlnet': ( XLNetConfig, TFXLNetLMHeadModel, XLNetLMHeadModel, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'xlm': ( XLMConfig, TFXLMWithLMHeadModel, XLMWithLMHeadModel, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'xlm-roberta': ( XLMRobertaConfig, TFXLMRobertaForMaskedLM, XLMRobertaForMaskedLM, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'transfo-xl': ( TransfoXLConfig, TFTransfoXLLMHeadModel, TransfoXLLMHeadModel, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'openai-gpt': ( OpenAIGPTConfig, TFOpenAIGPTLMHeadModel, OpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'roberta': ( RobertaConfig, TFRobertaForCausalLM, TFRobertaForMaskedLM, RobertaForMaskedLM, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'layoutlm': ( LayoutLMConfig, TFLayoutLMForMaskedLM, LayoutLMForMaskedLM, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, ), 'roberta-large-mnli': ( RobertaConfig, TFRobertaForSequenceClassification, RobertaForSequenceClassification, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'camembert': ( CamembertConfig, TFCamembertForMaskedLM, CamembertForMaskedLM, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'flaubert': ( FlaubertConfig, TFFlaubertWithLMHeadModel, FlaubertWithLMHeadModel, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'distilbert': ( DistilBertConfig, TFDistilBertForMaskedLM, DistilBertForMaskedLM, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'distilbert-base-distilled-squad': ( DistilBertConfig, TFDistilBertForQuestionAnswering, DistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'lxmert': ( LxmertConfig, TFLxmertForPreTraining, LxmertForPreTraining, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'lxmert-visual-feature-encoder': ( LxmertConfig, TFLxmertVisualFeatureEncoder, LxmertVisualFeatureEncoder, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'ctrl': ( CTRLConfig, TFCTRLLMHeadModel, CTRLLMHeadModel, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'albert': ( AlbertConfig, TFAlbertForPreTraining, AlbertForPreTraining, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 't5': ( TaConfig, TFTaForConditionalGeneration, TaForConditionalGeneration, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'electra': ( ElectraConfig, TFElectraForPreTraining, ElectraForPreTraining, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'wav2vec2': ( WavaVecaConfig, TFWavaVecaModel, WavaVecaModel, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), } def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=False , snake_case__=True ): '''simple docstring''' if model_type not in MODEL_CLASSES: raise ValueError(F'Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.' ) A, A, A, A : List[Any] = MODEL_CLASSES[model_type] # Initialise TF model if config_file in aws_config_map: A : Any = cached_file(snake_case__ , snake_case__ , force_download=not use_cached_models ) A : List[Any] = config_class.from_json_file(snake_case__ ) A : int = True A : str = True print(F'Building TensorFlow model from configuration: {config}' ) A : List[str] = model_class(snake_case__ ) # Load weights from tf checkpoint if pytorch_checkpoint_path in aws_config_map.keys(): A : Tuple = cached_file( snake_case__ , snake_case__ , force_download=not use_cached_models ) # Load PyTorch checkpoint in tf2 model: A : List[str] = load_pytorch_checkpoint_in_tfa_model(snake_case__ , snake_case__ ) if compare_with_pt_model: A : Dict = tf_model(tf_model.dummy_inputs , training=snake_case__ ) # build the network A : List[Any] = torch.load(snake_case__ , map_location='''cpu''' ) A : Dict = pt_model_class.from_pretrained( pretrained_model_name_or_path=snake_case__ , config=snake_case__ , state_dict=snake_case__ ) with torch.no_grad(): A : Union[str, Any] = pt_model(**pt_model.dummy_inputs ) A : Tuple = pto[0].numpy() A : List[str] = tfo[0].numpy() A : Tuple = np.amax(np.abs(np_pt - np_tf ) ) print(F'Max absolute difference between models outputs {diff}' ) assert diff <= 2E-2, F'Error, model absolute difference is >2e-2: {diff}' # Save pytorch-model print(F'Save TensorFlow model to {tf_dump_path}' ) tf_model.save_weights(snake_case__ , save_format='''h5''' ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=False , ): '''simple docstring''' if args_model_type is None: A : int = list(MODEL_CLASSES.keys() ) else: A : Any = [args_model_type] for j, model_type in enumerate(snake_case__ , start=1 ): print('''=''' * 100 ) print(F' Converting model type {j}/{len(snake_case__ )}: {model_type}' ) print('''=''' * 100 ) if model_type not in MODEL_CLASSES: raise ValueError(F'Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.' ) A, A, A, A, A : Optional[int] = MODEL_CLASSES[model_type] if model_shortcut_names_or_path is None: A : Any = list(aws_model_maps.keys() ) if config_shortcut_names_or_path is None: A : List[Any] = model_shortcut_names_or_path for i, (model_shortcut_name, config_shortcut_name) in enumerate( zip(snake_case__ , snake_case__ ) , start=1 ): print('''-''' * 100 ) if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name: if not only_convert_finetuned_models: print(F' Skipping finetuned checkpoint {model_shortcut_name}' ) continue A : Tuple = model_shortcut_name elif only_convert_finetuned_models: print(F' Skipping not finetuned checkpoint {model_shortcut_name}' ) continue print( F' Converting checkpoint {i}/{len(snake_case__ )}: {model_shortcut_name} - model_type {model_type}' ) print('''-''' * 100 ) if config_shortcut_name in aws_config_map: A : int = cached_file(snake_case__ , snake_case__ , force_download=not use_cached_models ) else: A : Union[str, Any] = config_shortcut_name if model_shortcut_name in aws_model_maps: A : Optional[int] = cached_file(snake_case__ , snake_case__ , force_download=not use_cached_models ) else: A : List[Any] = model_shortcut_name if os.path.isfile(snake_case__ ): A : Any = '''converted_model''' convert_pt_checkpoint_to_tf( model_type=snake_case__ , pytorch_checkpoint_path=snake_case__ , config_file=snake_case__ , tf_dump_path=os.path.join(snake_case__ , model_shortcut_name + '''-tf_model.h5''' ) , compare_with_pt_model=snake_case__ , ) if remove_cached_files: os.remove(snake_case__ ) os.remove(snake_case__ ) if __name__ == "__main__": lowercase : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_dump_path', default=None, type=str, required=True, help='Path to the output Tensorflow dump file.' ) parser.add_argument( '--model_type', default=None, type=str, help=( f'''Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and ''' 'convert all the models from AWS.' ), ) parser.add_argument( '--pytorch_checkpoint_path', default=None, type=str, help=( 'Path to the PyTorch checkpoint path or shortcut name to download from AWS. ' 'If not given, will download and convert all the checkpoints from AWS.' ), ) parser.add_argument( '--config_file', default=None, type=str, help=( 'The config json file corresponding to the pre-trained model. \n' 'This specifies the model architecture. If not given and ' '--pytorch_checkpoint_path is not given or is a shortcut name ' 'use the configuration associated to the shortcut name on the AWS' ), ) parser.add_argument( '--compare_with_pt_model', action='store_true', help='Compare Tensorflow and PyTorch model predictions.' ) parser.add_argument( '--use_cached_models', action='store_true', help='Use cached models if possible instead of updating to latest checkpoint versions.', ) parser.add_argument( '--remove_cached_files', action='store_true', help='Remove pytorch models after conversion (save memory when converting in batches).', ) parser.add_argument('--only_convert_finetuned_models', action='store_true', help='Only convert finetuned models.') lowercase : Optional[Any] = parser.parse_args() # if args.pytorch_checkpoint_path is not None: # convert_pt_checkpoint_to_tf(args.model_type.lower(), # args.pytorch_checkpoint_path, # args.config_file if args.config_file is not None else args.pytorch_checkpoint_path, # args.tf_dump_path, # compare_with_pt_model=args.compare_with_pt_model, # use_cached_models=args.use_cached_models) # else: convert_all_pt_checkpoints_to_tf( args.model_type.lower() if args.model_type is not None else None, args.tf_dump_path, model_shortcut_names_or_path=[args.pytorch_checkpoint_path] if args.pytorch_checkpoint_path is not None else None, config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None, compare_with_pt_model=args.compare_with_pt_model, use_cached_models=args.use_cached_models, remove_cached_files=args.remove_cached_files, only_convert_finetuned_models=args.only_convert_finetuned_models, )
3
'''simple docstring''' import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging lowercase : Dict = logging.get_logger(__name__) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ): '''simple docstring''' try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise if not is_sharded: A : Union[str, Any] = os.path.abspath(snake_case__ ) logger.info(F'Loading PyTorch weights from {pt_path}' ) A : Any = torch.load(snake_case__ , map_location='''cpu''' ) logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' ) A : List[str] = convert_pytorch_state_dict_to_flax(snake_case__ , snake_case__ ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files A : Any = convert_pytorch_sharded_state_dict_to_flax(snake_case__ , snake_case__ ) return flax_state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' def is_key_or_prefix_key_in_dict(snake_case__ ) -> bool: return len(set(snake_case__ ) & {key, (model_prefix,) + key} ) > 0 # layer norm A : Union[str, Any] = pt_tuple_key[:-1] + ('''scale''',) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean A : Tuple = pt_tuple_key[:-1] + ('''mean''',) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var A : Dict = pt_tuple_key[:-1] + ('''var''',) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # embedding A : Any = pt_tuple_key[:-1] + ('''embedding''',) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # conv layer A : Optional[Any] = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(snake_case__ ): A : List[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer A : Optional[int] = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(snake_case__ ): A : str = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight A : Dict = pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias A : List[Any] = pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 A : Dict = None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): A : List[Any] = pt_tuple_key[-2] + '''_g''' elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): A : List[str] = pt_tuple_key[-2] + '''_v''' if name is not None: A : int = pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Dict = {k: v.numpy() for k, v in pt_state_dict.items()} A : int = flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: A : List[str] = flax_model.params['''params'''] else: A : Dict = flax_model.params A : List[Any] = flatten_dict(snake_case__ ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: A : List[str] = flatten_dict(flax_model.params['''batch_stats'''] ) random_flax_state_dict.update(snake_case__ ) A : int = {} A : List[str] = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) A : int = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): A : str = tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary A : Union[str, Any] = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: A : Any = pt_tuple_key[1:] # Correctly rename weight parameters A, A : Dict = rename_key_and_reshape_tensor( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # add model prefix if necessary A : Any = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: A : int = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ' F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: A : Tuple = jnp.asarray(snake_case__ ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(snake_case__ , snake_case__ ) continue # also add unexpected weight so that warning is thrown A : List[str] = jnp.asarray(snake_case__ ) else: # also add unexpected weight so that warning is thrown A : Optional[Any] = jnp.asarray(snake_case__ ) return unflatten_dict(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' import torch # Load the index A : Union[str, Any] = {} for shard_file in shard_filenames: # load using msgpack utils A : List[str] = torch.load(snake_case__ ) A : int = {k: v.numpy() for k, v in pt_state_dict.items()} A : Tuple = flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: A : Optional[int] = flax_model.params['''params'''] A : List[Any] = flatten_dict(snake_case__ ) random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) ) else: A : Dict = flax_model.params A : Tuple = flatten_dict(snake_case__ ) A : List[str] = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) A : List[str] = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): A : int = tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary A : List[str] = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: A : Union[str, Any] = pt_tuple_key[1:] # Correctly rename weight parameters A, A : Any = rename_key_and_reshape_tensor( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # add model prefix if necessary A : int = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: A : int = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ' F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: A : Optional[int] = jnp.asarray(snake_case__ ) continue if "var" in flax_key[-1]: A : Optional[int] = jnp.asarray(snake_case__ ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(snake_case__ , snake_case__ ) continue # also add unexpected weight so that warning is thrown A : Optional[Any] = jnp.asarray(snake_case__ ) else: # also add unexpected weight so that warning is thrown A : Optional[Any] = jnp.asarray(snake_case__ ) return unflatten_dict(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Dict = os.path.abspath(snake_case__ ) logger.info(F'Loading Flax weights from {flax_checkpoint_path}' ) # import correct flax class A : List[str] = getattr(snake_case__ , '''Flax''' + model.__class__.__name__ ) # load flax weight dict with open(snake_case__ , '''rb''' ) as state_f: try: A : int = from_bytes(snake_case__ , state_f.read() ) except UnpicklingError: raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' ) return load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise # check if we have bf16 weights A : List[str] = flatten_dict(jax.tree_util.tree_map(lambda snake_case__ : x.dtype == jnp.bfloataa , snake_case__ ) ).values() if any(snake_case__ ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( '''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ''' '''before loading those in PyTorch model.''' ) A : Optional[Any] = jax.tree_util.tree_map( lambda snake_case__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , snake_case__ ) A : Union[str, Any] = flatten_dict(snake_case__ ) A : List[Any] = pt_model.state_dict() A : Union[str, Any] = (pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) A : Tuple = (pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys A : int = [] A : Any = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): A : Union[str, Any] = flax_key_tuple[0] == pt_model.base_model_prefix A : int = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: A : List[str] = flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: A : Optional[Any] = (pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(snake_case__ ) not in pt_model_dict: # conv layer A : Optional[int] = flax_key_tuple[:-1] + ('''weight''',) A : Optional[int] = jnp.transpose(snake_case__ , (3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case__ ) not in pt_model_dict: # linear layer A : Tuple = flax_key_tuple[:-1] + ('''weight''',) A : Tuple = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: A : Optional[int] = flax_key_tuple[:-1] + ('''weight''',) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: A : Tuple = flax_key_tuple[:-1] + ('''running_mean''',) elif "var" in flax_key_tuple[-1]: A : Tuple = flax_key_tuple[:-1] + ('''running_var''',) if "batch_stats" in flax_state: A : List[Any] = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: A : Union[str, Any] = '''.'''.join(snake_case__ ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. A : int = {} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: A : Optional[int] = key.split('''.''' ) A : Dict = None if key_components[-3::2] == ["parametrizations", "original0"]: A : List[str] = key_components[-2] + '''_g''' elif key_components[-3::2] == ["parametrizations", "original1"]: A : List[Any] = key_components[-2] + '''_v''' if name is not None: A : str = key_components[:-3] + [name] A : Optional[Any] = '''.'''.join(snake_case__ ) A : Optional[Any] = key if flax_key in special_pt_names: A : Optional[Any] = special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ' F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) else: # add weight to pytorch dict A : Dict = np.asarray(snake_case__ ) if not isinstance(snake_case__ , np.ndarray ) else flax_tensor A : Dict = torch.from_numpy(snake_case__ ) # remove from missing keys missing_keys.remove(snake_case__ ) else: # weight is not expected by PyTorch model unexpected_keys.append(snake_case__ ) pt_model.load_state_dict(snake_case__ ) # re-transform missing_keys to list A : List[Any] = list(snake_case__ ) if len(snake_case__ ) > 0: logger.warning( '''Some weights of the Flax model were not used when initializing the PyTorch model''' F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing' F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture' ''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This''' F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect' ''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a''' ''' FlaxBertForSequenceClassification model).''' ) else: logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' ) if len(snake_case__ ) > 0: logger.warning( F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly' F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to' ''' use it for predictions and inference.''' ) else: logger.warning( F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n' '''If your task is similar to the task the model of the checkpoint was trained on, ''' F'you can already use {pt_model.__class__.__name__} for predictions without further training.' ) return pt_model
3
1
import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) snake_case_ : List[Any] = { "sample_size": 32, "in_channels": 3, "out_channels": 3, "layers_per_block": 2, "num_class_embeds": 1000, "block_out_channels": [32, 64], "attention_head_dim": 8, "down_block_types": [ "ResnetDownsampleBlock2D", "AttnDownBlock2D", ], "up_block_types": [ "AttnUpBlock2D", "ResnetUpsampleBlock2D", ], "resnet_time_scale_shift": "scale_shift", "upsample_type": "resnet", "downsample_type": "resnet", } snake_case_ : Dict = { "sample_size": 64, "in_channels": 3, "out_channels": 3, "layers_per_block": 3, "num_class_embeds": 1000, "block_out_channels": [192, 192 * 2, 192 * 3, 192 * 4], "attention_head_dim": 64, "down_block_types": [ "ResnetDownsampleBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", ], "up_block_types": [ "AttnUpBlock2D", "AttnUpBlock2D", "AttnUpBlock2D", "ResnetUpsampleBlock2D", ], "resnet_time_scale_shift": "scale_shift", "upsample_type": "resnet", "downsample_type": "resnet", } snake_case_ : List[Any] = { "sample_size": 256, "in_channels": 3, "out_channels": 3, "layers_per_block": 2, "num_class_embeds": None, "block_out_channels": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4], "attention_head_dim": 64, "down_block_types": [ "ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", ], "up_block_types": [ "AttnUpBlock2D", "AttnUpBlock2D", "AttnUpBlock2D", "ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D", ], "resnet_time_scale_shift": "default", "upsample_type": "resnet", "downsample_type": "resnet", } snake_case_ : List[Any] = { "num_train_timesteps": 40, "sigma_min": 0.002, "sigma_max": 80.0, } snake_case_ : Any = { "num_train_timesteps": 201, "sigma_min": 0.002, "sigma_max": 80.0, } snake_case_ : Tuple = { "num_train_timesteps": 151, "sigma_min": 0.002, "sigma_max": 80.0, } def A (__A : List[Any] ) -> Any: """simple docstring""" if isinstance(__A , __A ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError('''boolean value expected''' ) def A (__A : Any , __A : Union[str, Any] , __A : Optional[int] , __A : Tuple , __A : Tuple=False ) -> List[Any]: """simple docstring""" UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.0.weight"""] UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.0.bias"""] UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.2.weight"""] UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.2.bias"""] UpperCAmelCase_ = checkpoint[F"""{old_prefix}.emb_layers.1.weight"""] UpperCAmelCase_ = checkpoint[F"""{old_prefix}.emb_layers.1.bias"""] UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.0.weight"""] UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.0.bias"""] UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.3.weight"""] UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.3.bias"""] if has_skip: UpperCAmelCase_ = checkpoint[F"""{old_prefix}.skip_connection.weight"""] UpperCAmelCase_ = checkpoint[F"""{old_prefix}.skip_connection.bias"""] return new_checkpoint def A (__A : Optional[int] , __A : Optional[int] , __A : Union[str, Any] , __A : Dict , __A : str=None ) -> List[Any]: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = checkpoint[F"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = checkpoint[F"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 ) UpperCAmelCase_ = checkpoint[F"""{old_prefix}.norm.weight"""] UpperCAmelCase_ = checkpoint[F"""{old_prefix}.norm.bias"""] UpperCAmelCase_ = weight_q.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase_ = bias_q.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase_ = weight_k.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase_ = bias_k.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase_ = weight_v.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase_ = bias_v.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase_ = ( checkpoint[F"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 ) ) UpperCAmelCase_ = checkpoint[F"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def A (__A : str , __A : int ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ = torch.load(__A , map_location='''cpu''' ) UpperCAmelCase_ = {} UpperCAmelCase_ = checkpoint['''time_embed.0.weight'''] UpperCAmelCase_ = checkpoint['''time_embed.0.bias'''] UpperCAmelCase_ = checkpoint['''time_embed.2.weight'''] UpperCAmelCase_ = checkpoint['''time_embed.2.bias'''] if unet_config["num_class_embeds"] is not None: UpperCAmelCase_ = checkpoint['''label_emb.weight'''] UpperCAmelCase_ = checkpoint['''input_blocks.0.0.weight'''] UpperCAmelCase_ = checkpoint['''input_blocks.0.0.bias'''] UpperCAmelCase_ = unet_config['''down_block_types'''] UpperCAmelCase_ = unet_config['''layers_per_block'''] UpperCAmelCase_ = unet_config['''attention_head_dim'''] UpperCAmelCase_ = unet_config['''block_out_channels'''] UpperCAmelCase_ = 1 UpperCAmelCase_ = channels_list[0] for i, layer_type in enumerate(__A ): UpperCAmelCase_ = channels_list[i] UpperCAmelCase_ = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(__A ): UpperCAmelCase_ = F"""down_blocks.{i}.resnets.{j}""" UpperCAmelCase_ = F"""input_blocks.{current_layer}.0""" UpperCAmelCase_ = True if j == 0 and downsample_block_has_skip else False UpperCAmelCase_ = convert_resnet(__A , __A , __A , __A , has_skip=__A ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(__A ): UpperCAmelCase_ = F"""down_blocks.{i}.resnets.{j}""" UpperCAmelCase_ = F"""input_blocks.{current_layer}.0""" UpperCAmelCase_ = True if j == 0 and downsample_block_has_skip else False UpperCAmelCase_ = convert_resnet(__A , __A , __A , __A , has_skip=__A ) UpperCAmelCase_ = F"""down_blocks.{i}.attentions.{j}""" UpperCAmelCase_ = F"""input_blocks.{current_layer}.1""" UpperCAmelCase_ = convert_attention( __A , __A , __A , __A , __A ) current_layer += 1 if i != len(__A ) - 1: UpperCAmelCase_ = F"""down_blocks.{i}.downsamplers.0""" UpperCAmelCase_ = F"""input_blocks.{current_layer}.0""" UpperCAmelCase_ = convert_resnet(__A , __A , __A , __A ) current_layer += 1 UpperCAmelCase_ = current_channels # hardcoded the mid-block for now UpperCAmelCase_ = '''mid_block.resnets.0''' UpperCAmelCase_ = '''middle_block.0''' UpperCAmelCase_ = convert_resnet(__A , __A , __A , __A ) UpperCAmelCase_ = '''mid_block.attentions.0''' UpperCAmelCase_ = '''middle_block.1''' UpperCAmelCase_ = convert_attention(__A , __A , __A , __A , __A ) UpperCAmelCase_ = '''mid_block.resnets.1''' UpperCAmelCase_ = '''middle_block.2''' UpperCAmelCase_ = convert_resnet(__A , __A , __A , __A ) UpperCAmelCase_ = 0 UpperCAmelCase_ = unet_config['''up_block_types'''] for i, layer_type in enumerate(__A ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): UpperCAmelCase_ = F"""up_blocks.{i}.resnets.{j}""" UpperCAmelCase_ = F"""output_blocks.{current_layer}.0""" UpperCAmelCase_ = convert_resnet(__A , __A , __A , __A , has_skip=__A ) current_layer += 1 if i != len(__A ) - 1: UpperCAmelCase_ = F"""up_blocks.{i}.upsamplers.0""" UpperCAmelCase_ = F"""output_blocks.{current_layer-1}.1""" UpperCAmelCase_ = convert_resnet(__A , __A , __A , __A ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): UpperCAmelCase_ = F"""up_blocks.{i}.resnets.{j}""" UpperCAmelCase_ = F"""output_blocks.{current_layer}.0""" UpperCAmelCase_ = convert_resnet(__A , __A , __A , __A , has_skip=__A ) UpperCAmelCase_ = F"""up_blocks.{i}.attentions.{j}""" UpperCAmelCase_ = F"""output_blocks.{current_layer}.1""" UpperCAmelCase_ = convert_attention( __A , __A , __A , __A , __A ) current_layer += 1 if i != len(__A ) - 1: UpperCAmelCase_ = F"""up_blocks.{i}.upsamplers.0""" UpperCAmelCase_ = F"""output_blocks.{current_layer-1}.2""" UpperCAmelCase_ = convert_resnet(__A , __A , __A , __A ) UpperCAmelCase_ = checkpoint['''out.0.weight'''] UpperCAmelCase_ = checkpoint['''out.0.bias'''] UpperCAmelCase_ = checkpoint['''out.2.weight'''] UpperCAmelCase_ = checkpoint['''out.2.bias'''] return new_checkpoint if __name__ == "__main__": snake_case_ : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.") parser.add_argument( "--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model." ) parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.") snake_case_ : Any = parser.parse_args() snake_case_ : Optional[int] = strabool(args.class_cond) snake_case_ : Optional[Any] = os.path.basename(args.unet_path) print(f"Checkpoint: {ckpt_name}") # Get U-Net config if "imagenet64" in ckpt_name: snake_case_ : Tuple = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): snake_case_ : List[str] = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: snake_case_ : str = TEST_UNET_CONFIG else: raise ValueError(f"Checkpoint type {ckpt_name} is not currently supported.") if not args.class_cond: snake_case_ : List[str] = None snake_case_ : Optional[Any] = con_pt_to_diffuser(args.unet_path, unet_config) snake_case_ : Dict = UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: snake_case_ : Tuple = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: snake_case_ : Tuple = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): snake_case_ : Optional[int] = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(f"Checkpoint type {ckpt_name} is not currently supported.") snake_case_ : List[Any] = CMStochasticIterativeScheduler(**scheduler_config) snake_case_ : Any = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
7
from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING snake_case_ : List[str] = logging.get_logger(__name__) @add_end_docstrings(a ) class __snake_case ( a ): def __init__( self : Tuple , *_snake_case : List[Any] , **_snake_case : Optional[Any]): """simple docstring""" super().__init__(*_snake_case , **_snake_case) self.check_model_type(_snake_case) def lowerCamelCase ( self : List[str] , _snake_case : Optional[int]=None , _snake_case : Optional[Any]=None , _snake_case : str=None , **_snake_case : Optional[Any]): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ = {}, {} if padding is not None: UpperCAmelCase_ = padding if truncation is not None: UpperCAmelCase_ = truncation if top_k is not None: UpperCAmelCase_ = top_k return preprocess_params, {}, postprocess_params def __call__( self : List[Any] , _snake_case : Union["Image.Image", str] , _snake_case : str = None , **_snake_case : str): """simple docstring""" if isinstance(_snake_case , (Image.Image, str)) and isinstance(_snake_case , _snake_case): UpperCAmelCase_ = {'''image''': image, '''question''': question} else: UpperCAmelCase_ = image UpperCAmelCase_ = super().__call__(_snake_case , **_snake_case) return results def lowerCamelCase ( self : Union[str, Any] , _snake_case : int , _snake_case : Optional[int]=False , _snake_case : int=False): """simple docstring""" UpperCAmelCase_ = load_image(inputs['''image''']) UpperCAmelCase_ = self.tokenizer( inputs['''question'''] , return_tensors=self.framework , padding=_snake_case , truncation=_snake_case) UpperCAmelCase_ = self.image_processor(images=_snake_case , return_tensors=self.framework) model_inputs.update(_snake_case) return model_inputs def lowerCamelCase ( self : List[Any] , _snake_case : Optional[Any]): """simple docstring""" UpperCAmelCase_ = self.model(**_snake_case) return model_outputs def lowerCamelCase ( self : str , _snake_case : Optional[Any] , _snake_case : List[str]=5): """simple docstring""" if top_k > self.model.config.num_labels: UpperCAmelCase_ = self.model.config.num_labels if self.framework == "pt": UpperCAmelCase_ = model_outputs.logits.sigmoid()[0] UpperCAmelCase_ , UpperCAmelCase_ = probs.topk(_snake_case) else: raise ValueError(F"""Unsupported framework: {self.framework}""") UpperCAmelCase_ = scores.tolist() UpperCAmelCase_ = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_snake_case , _snake_case)]
7
1
'''simple docstring''' import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Tuple = (DPMSolverSDEScheduler,) lowerCAmelCase_ : Optional[Any] = 10 def SCREAMING_SNAKE_CASE__ ( self : Tuple , **_UpperCAmelCase : int ): """simple docstring""" UpperCAmelCase__ = { """num_train_timesteps""": 11_00, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", """noise_sampler_seed""": 0, } config.update(**_UpperCAmelCase ) return config def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" for timesteps in [10, 50, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" UpperCAmelCase__ = self.scheduler_classes[0] UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) UpperCAmelCase__ = self.dummy_model() UpperCAmelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma UpperCAmelCase__ = sample.to(_UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): UpperCAmelCase__ = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = model(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = output.prev_sample UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1E-2 assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1E-2 assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1E-3 else: assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2 assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" UpperCAmelCase__ = self.scheduler_classes[0] UpperCAmelCase__ = self.get_scheduler_config(prediction_type="""v_prediction""" ) UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) UpperCAmelCase__ = self.dummy_model() UpperCAmelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma UpperCAmelCase__ = sample.to(_UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): UpperCAmelCase__ = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = model(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = output.prev_sample UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1E-2 assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1E-2 assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1E-3 else: assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1E-2 assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" UpperCAmelCase__ = self.scheduler_classes[0] UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps , device=_UpperCAmelCase ) UpperCAmelCase__ = self.dummy_model() UpperCAmelCase__ = self.dummy_sample_deter.to(_UpperCAmelCase ) * scheduler.init_noise_sigma for t in scheduler.timesteps: UpperCAmelCase__ = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = model(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = output.prev_sample UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1E-2 assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1E-2 assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1E-3 else: assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2 assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" UpperCAmelCase__ = self.scheduler_classes[0] UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase , use_karras_sigmas=_UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps , device=_UpperCAmelCase ) UpperCAmelCase__ = self.dummy_model() UpperCAmelCase__ = self.dummy_sample_deter.to(_UpperCAmelCase ) * scheduler.init_noise_sigma UpperCAmelCase__ = sample.to(_UpperCAmelCase ) for t in scheduler.timesteps: UpperCAmelCase__ = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = model(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = output.prev_sample UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1E-2 assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1E-2 assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2 else: assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1E-2 assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
346
'''simple docstring''' import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Union[str, Any] = (PNDMScheduler,) lowerCAmelCase_ : Optional[int] = (("""num_inference_steps""", 50),) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , **_UpperCAmelCase : Optional[int] ): """simple docstring""" UpperCAmelCase__ = { """num_train_timesteps""": 10_00, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", } config.update(**_UpperCAmelCase ) return config def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Tuple=0 , **_UpperCAmelCase : List[str] ): """simple docstring""" UpperCAmelCase__ = dict(self.forward_default_kwargs ) UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase ) UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.get_scheduler_config(**_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals UpperCAmelCase__ = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class.from_pretrained(_UpperCAmelCase ) new_scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals UpperCAmelCase__ = dummy_past_residuals[:] UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Union[str, Any]=0 , **_UpperCAmelCase : Optional[int] ): """simple docstring""" UpperCAmelCase__ = dict(self.forward_default_kwargs ) UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase ) UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) UpperCAmelCase__ = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class.from_pretrained(_UpperCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residual (must be after setting timesteps) UpperCAmelCase__ = dummy_past_residuals[:] UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ ( self : int , **_UpperCAmelCase : Tuple ): """simple docstring""" UpperCAmelCase__ = self.scheduler_classes[0] UpperCAmelCase__ = self.get_scheduler_config(**_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) UpperCAmelCase__ = 10 UpperCAmelCase__ = self.dummy_model() UpperCAmelCase__ = self.dummy_sample_deter scheduler.set_timesteps(_UpperCAmelCase ) for i, t in enumerate(scheduler.prk_timesteps ): UpperCAmelCase__ = model(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): UpperCAmelCase__ = model(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample return sample def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = dict(self.forward_default_kwargs ) UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase ) for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample if num_inference_steps is not None and hasattr(_UpperCAmelCase , """set_timesteps""" ): scheduler.set_timesteps(_UpperCAmelCase ) elif num_inference_steps is not None and not hasattr(_UpperCAmelCase , """set_timesteps""" ): UpperCAmelCase__ = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] UpperCAmelCase__ = dummy_past_residuals[:] UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" for timesteps in [1_00, 10_00]: self.check_over_configs(num_train_timesteps=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" for steps_offset in [0, 1]: self.check_over_configs(steps_offset=_UpperCAmelCase ) UpperCAmelCase__ = self.scheduler_classes[0] UpperCAmelCase__ = self.get_scheduler_config(steps_offset=1 ) UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ): self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" for t in [1, 5, 10]: self.check_over_forward(time_step=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ): self.check_over_forward(num_inference_steps=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = 27 for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" with self.assertRaises(_UpperCAmelCase ): UpperCAmelCase__ = self.scheduler_classes[0] UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" UpperCAmelCase__ = self.full_loop() UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 198.1318 ) < 1E-2 assert abs(result_mean.item() - 0.2580 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" UpperCAmelCase__ = self.full_loop(prediction_type="""v_prediction""" ) UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 67.3986 ) < 1E-2 assert abs(result_mean.item() - 0.0878 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 ) UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 230.0399 ) < 1E-2 assert abs(result_mean.item() - 0.2995 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" UpperCAmelCase__ = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 ) UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 186.9482 ) < 1E-2 assert abs(result_mean.item() - 0.2434 ) < 1E-3
346
1
import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class UpperCAmelCase__ : """simple docstring""" def __init__( self , A_ , A_=99 , A_=13 , A_=16 , A_=7 , A_=True , A_=True , A_=True , A_=False , A_=True , A_=2 , A_=32 , A_=4 , A_=4 , A_=30 , A_=0 , A_=1 , A_=2 , A_=None , ) -> List[str]: __UpperCamelCase =parent __UpperCamelCase =batch_size __UpperCamelCase =decoder_seq_length # For common tests __UpperCamelCase =self.decoder_seq_length __UpperCamelCase =is_training __UpperCamelCase =use_attention_mask __UpperCamelCase =use_labels __UpperCamelCase =vocab_size __UpperCamelCase =d_model __UpperCamelCase =d_model __UpperCamelCase =decoder_layers __UpperCamelCase =decoder_layers __UpperCamelCase =decoder_ffn_dim __UpperCamelCase =decoder_attention_heads __UpperCamelCase =decoder_attention_heads __UpperCamelCase =eos_token_id __UpperCamelCase =bos_token_id __UpperCamelCase =pad_token_id __UpperCamelCase =decoder_start_token_id __UpperCamelCase =use_cache __UpperCamelCase =max_position_embeddings __UpperCamelCase =None __UpperCamelCase =decoder_seq_length __UpperCamelCase =2 __UpperCamelCase =1 def _a ( self ) -> List[Any]: __UpperCamelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) __UpperCamelCase =None if self.use_attention_mask: __UpperCamelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 ) __UpperCamelCase =None if self.use_labels: __UpperCamelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) __UpperCamelCase =TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def _a ( self , A_ , A_ , A_ , A_ , ) -> Dict: __UpperCamelCase =True __UpperCamelCase =TrOCRDecoder(config=A_ ).to(A_ ).eval() __UpperCamelCase =input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass __UpperCamelCase =model(A_ , use_cache=A_ ) __UpperCamelCase =model(A_ ) __UpperCamelCase =model(A_ , use_cache=A_ ) self.parent.assertTrue(len(A_ ) == len(A_ ) ) self.parent.assertTrue(len(A_ ) == len(A_ ) + 1 ) __UpperCamelCase =outputs['past_key_values'] # create hypothetical next token and extent to next_input_ids __UpperCamelCase =ids_tensor((2, 1) , config.vocab_size - 1 ) + 1 # append to next input_ids and __UpperCamelCase =torch.cat([input_ids, next_tokens] , dim=-1 ) __UpperCamelCase =model(A_ )['last_hidden_state'] __UpperCamelCase =model(A_ , past_key_values=A_ )['last_hidden_state'] # select random slice __UpperCamelCase =ids_tensor((1,) , output_from_past.shape[-1] ).item() __UpperCamelCase =output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() __UpperCamelCase =output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(A_ , A_ , atol=1E-3 ) def _a ( self ) -> Union[str, Any]: __UpperCamelCase =self.prepare_config_and_inputs() __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =config_and_inputs __UpperCamelCase ={'input_ids': input_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_torch class UpperCAmelCase__ ( A_ , A_ , A_ , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : Optional[Any] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () UpperCAmelCase__ : str = (TrOCRForCausalLM,) if is_torch_available() else () UpperCAmelCase__ : Dict = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {} UpperCAmelCase__ : Union[str, Any] = True UpperCAmelCase__ : Tuple = False def _a ( self ) -> Optional[Any]: __UpperCamelCase =TrOCRStandaloneDecoderModelTester(self , is_training=A_ ) __UpperCamelCase =ConfigTester(self , config_class=A_ ) def _a ( self ) -> List[str]: pass def _a ( self ) -> List[Any]: pass def _a ( self ) -> Union[str, Any]: pass def _a ( self ) -> Tuple: self.config_tester.run_common_tests() def _a ( self ) -> Optional[int]: __UpperCamelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*A_ ) def _a ( self ) -> int: return @unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :) def _a ( self ) -> List[Any]: pass
353
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version _A = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt') @dataclass class UpperCAmelCase__ : """simple docstring""" UpperCAmelCase__ : Optional[str] = field( default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} ) UpperCAmelCase__ : Optional[str] = field( default=A_ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) UpperCAmelCase__ : Optional[str] = field( default=A_ , metadata={"help": "The column name of the images in the files."} ) UpperCAmelCase__ : Optional[str] = field(default=A_ , metadata={"help": "A folder containing the training data."} ) UpperCAmelCase__ : Optional[str] = field(default=A_ , metadata={"help": "A folder containing the validation data."} ) UpperCAmelCase__ : Optional[float] = field( default=0.15 , metadata={"help": "Percent to split off of train for validation."} ) UpperCAmelCase__ : Optional[int] = field( default=A_ , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) UpperCAmelCase__ : Optional[int] = field( default=A_ , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) def _a ( self ) -> Optional[int]: __UpperCamelCase ={} if self.train_dir is not None: __UpperCamelCase =self.train_dir if self.validation_dir is not None: __UpperCamelCase =self.validation_dir __UpperCamelCase =data_files if data_files else None @dataclass class UpperCAmelCase__ : """simple docstring""" UpperCAmelCase__ : str = field( default=A_ , metadata={ "help": ( "The model checkpoint for weights initialization.Don't set if you want to train a model from scratch." ) } , ) UpperCAmelCase__ : Optional[str] = field( default=A_ , metadata={"help": "Pretrained config name or path if not the same as model_name_or_path"} ) UpperCAmelCase__ : Optional[str] = field( default=A_ , metadata={ "help": ( "Override some existing default config settings when a model is trained from scratch. Example: " "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" ) } , ) UpperCAmelCase__ : Optional[str] = field( default=A_ , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} ) UpperCAmelCase__ : str = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) UpperCAmelCase__ : str = field(default=A_ , metadata={"help": "Name or path of preprocessor config."} ) UpperCAmelCase__ : bool = field( default=A_ , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) UpperCAmelCase__ : float = field( default=0.75 , metadata={"help": "The ratio of the number of masked tokens in the input sequence."} ) UpperCAmelCase__ : bool = field( default=A_ , metadata={"help": "Whether or not to train with normalized pixel values as target."} ) @dataclass class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : float = field( default=1e-3 , metadata={"help": "Base learning rate: absolute_lr = base_lr * total_batch_size / 256."} ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ): __UpperCamelCase =torch.stack([example['pixel_values'] for example in examples] ) return {"pixel_values": pixel_values} def _UpperCAmelCase ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __UpperCamelCase =HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_mae' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() __UpperCamelCase =training_args.get_process_log_level() logger.setLevel(SCREAMING_SNAKE_CASE__ ) transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(F'Training/evaluation parameters {training_args}' ) # Detecting last checkpoint. __UpperCamelCase =None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __UpperCamelCase =get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. ' 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Initialize our dataset. __UpperCamelCase =load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. __UpperCamelCase =None if 'validation' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , SCREAMING_SNAKE_CASE__ ) and data_args.train_val_split > 0.0: __UpperCamelCase =ds['train'].train_test_split(data_args.train_val_split ) __UpperCamelCase =split['train'] __UpperCamelCase =split['test'] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __UpperCamelCase ={ 'cache_dir': model_args.cache_dir, 'revision': model_args.model_revision, 'use_auth_token': True if model_args.use_auth_token else None, } if model_args.config_name: __UpperCamelCase =ViTMAEConfig.from_pretrained(model_args.config_name , **SCREAMING_SNAKE_CASE__ ) elif model_args.model_name_or_path: __UpperCamelCase =ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE__ ) else: __UpperCamelCase =ViTMAEConfig() logger.warning('You are instantiating a new config instance from scratch.' ) if model_args.config_overrides is not None: logger.info(F'Overriding config: {model_args.config_overrides}' ) config.update_from_string(model_args.config_overrides ) logger.info(F'New config: {config}' ) # adapt config config.update( { 'mask_ratio': model_args.mask_ratio, 'norm_pix_loss': model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: __UpperCamelCase =ViTImageProcessor.from_pretrained(model_args.image_processor_name , **SCREAMING_SNAKE_CASE__ ) elif model_args.model_name_or_path: __UpperCamelCase =ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE__ ) else: __UpperCamelCase =ViTImageProcessor() # create model if model_args.model_name_or_path: __UpperCamelCase =ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('Training new model from scratch' ) __UpperCamelCase =ViTMAEForPreTraining(SCREAMING_SNAKE_CASE__ ) if training_args.do_train: __UpperCamelCase =ds['train'].column_names else: __UpperCamelCase =ds['validation'].column_names if data_args.image_column_name is not None: __UpperCamelCase =data_args.image_column_name elif "image" in column_names: __UpperCamelCase ='image' elif "img" in column_names: __UpperCamelCase ='img' else: __UpperCamelCase =column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: __UpperCamelCase =image_processor.size['shortest_edge'] else: __UpperCamelCase =(image_processor.size['height'], image_processor.size['width']) __UpperCamelCase =Compose( [ Lambda(lambda SCREAMING_SNAKE_CASE__ : img.convert('RGB' ) if img.mode != "RGB" else img ), RandomResizedCrop(SCREAMING_SNAKE_CASE__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(SCREAMING_SNAKE_CASE__ : Optional[Any] ): __UpperCamelCase =[transforms(SCREAMING_SNAKE_CASE__ ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError('--do_train requires a train dataset' ) if data_args.max_train_samples is not None: __UpperCamelCase =ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(SCREAMING_SNAKE_CASE__ ) if training_args.do_eval: if "validation" not in ds: raise ValueError('--do_eval requires a validation dataset' ) if data_args.max_eval_samples is not None: __UpperCamelCase =( ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(SCREAMING_SNAKE_CASE__ ) # Compute absolute learning rate __UpperCamelCase =( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: __UpperCamelCase =training_args.base_learning_rate * total_train_batch_size / 2_56 # Initialize our trainer __UpperCamelCase =Trainer( model=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=SCREAMING_SNAKE_CASE__ , data_collator=SCREAMING_SNAKE_CASE__ , ) # Training if training_args.do_train: __UpperCamelCase =None if training_args.resume_from_checkpoint is not None: __UpperCamelCase =training_args.resume_from_checkpoint elif last_checkpoint is not None: __UpperCamelCase =last_checkpoint __UpperCamelCase =trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ ) trainer.save_model() trainer.log_metrics('train' , train_result.metrics ) trainer.save_metrics('train' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: __UpperCamelCase =trainer.evaluate() trainer.log_metrics('eval' , SCREAMING_SNAKE_CASE__ ) trainer.save_metrics('eval' , SCREAMING_SNAKE_CASE__ ) # Write model card and (optionally) push to hub __UpperCamelCase ={ 'tasks': 'masked-auto-encoding', 'dataset': data_args.dataset_name, 'tags': ['masked-auto-encoding'], } if training_args.push_to_hub: trainer.push_to_hub(**SCREAMING_SNAKE_CASE__ ) else: trainer.create_model_card(**SCREAMING_SNAKE_CASE__ ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
117
0
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) __lowerCAmelCase : Tuple =logging.getLogger(__name__) @dataclass class UpperCAmelCase : __lowercase = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) __lowercase = field( default=UpperCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) __lowercase = field( default=UpperCamelCase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) __lowercase = field( default=UpperCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) __lowercase = field(default=UpperCamelCase__ , metadata={"""help""": """Whether tp freeze the encoder."""} ) __lowercase = field(default=UpperCamelCase__ , metadata={"""help""": """Whether to freeze the embeddings."""} ) @dataclass class UpperCAmelCase : __lowercase = field( metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} ) __lowercase = field( default="""summarization""" , metadata={"""help""": """Task name, summarization (or summarization_{dataset} for pegasus) or translation"""} , ) __lowercase = field( default=1024 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) __lowercase = field( default=128 , metadata={ """help""": ( """The maximum total sequence length for target text after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) __lowercase = field( default=142 , metadata={ """help""": ( """The maximum total sequence length for validation target text after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded. """ """This argument is also used to override the ``max_length`` param of ``model.generate``, which is used """ """during ``evaluate`` and ``predict``.""" ) } , ) __lowercase = field( default=142 , metadata={ """help""": ( """The maximum total sequence length for test target text after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) __lowercase = field(default=-1 , metadata={"""help""": """# training examples. -1 means use all."""} ) __lowercase = field(default=-1 , metadata={"""help""": """# validation examples. -1 means use all."""} ) __lowercase = field(default=-1 , metadata={"""help""": """# test examples. -1 means use all."""} ) __lowercase = field(default=UpperCamelCase__ , metadata={"""help""": """Source language id for translation."""} ) __lowercase = field(default=UpperCamelCase__ , metadata={"""help""": """Target language id for translation."""} ) __lowercase = field(default=UpperCamelCase__ , metadata={"""help""": """# num_beams to use for evaluation."""} ) __lowercase = field( default=UpperCamelCase__ , metadata={"""help""": """If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."""} , ) def UpperCamelCase ( _lowerCamelCase : Any , _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any] ): logger.info(F"***** {split} metrics *****" ) for key in sorted(metrics.keys() ): logger.info(F" {key} = {metrics[key]}" ) save_json(_lowerCamelCase , os.path.join(_lowerCamelCase , F"{split}_results.json" ) ) def UpperCamelCase ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. A__, A__, A__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: A__, A__, A__ = parser.parse_args_into_dataclasses() check_output_dir(_lowerCamelCase ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info("Training/evaluation parameters %s" , _lowerCamelCase ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. A__ = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) A__ = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout") for p in extra_model_params: if getattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): assert hasattr(_lowerCamelCase , _lowerCamelCase ), F"({config.__class__.__name__}) doesn't have a `{p}` attribute" setattr(_lowerCamelCase , _lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) A__ = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) A__ = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=_lowerCamelCase , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(_lowerCamelCase , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: A__ = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(_lowerCamelCase , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(_lowerCamelCase , _lowerCamelCase ): A__ = tokenizer.lang_code_to_id[data_args.tgt_lang] else: A__ = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(_lowerCamelCase ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) A__ = SeqaSeqDataset # Get datasets A__ = ( dataset_class( _lowerCamelCase , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , ) if training_args.do_train else None ) A__ = ( dataset_class( _lowerCamelCase , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) A__ = ( dataset_class( _lowerCamelCase , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , ) if training_args.do_predict else None ) # Initialize our Trainer A__ = ( build_compute_metrics_fn(data_args.task , _lowerCamelCase ) if training_args.predict_with_generate else None ) A__ = SeqaSeqTrainer( model=_lowerCamelCase , args=_lowerCamelCase , data_args=_lowerCamelCase , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , data_collator=SeqaSeqDataCollator( _lowerCamelCase , _lowerCamelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_lowerCamelCase , tokenizer=_lowerCamelCase , ) A__ = {} # Training if training_args.do_train: logger.info("*** Train ***" ) A__ = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) A__ = train_result.metrics A__ = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics("train" , _lowerCamelCase , training_args.output_dir ) all_metrics.update(_lowerCamelCase ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) A__ = trainer.evaluate(metric_key_prefix="val" ) A__ = data_args.n_val A__ = round(metrics["val_loss"] , 4 ) if trainer.is_world_process_zero(): handle_metrics("val" , _lowerCamelCase , training_args.output_dir ) all_metrics.update(_lowerCamelCase ) if training_args.do_predict: logger.info("*** Predict ***" ) A__ = trainer.predict(test_dataset=_lowerCamelCase , metric_key_prefix="test" ) A__ = test_output.metrics A__ = data_args.n_test if trainer.is_world_process_zero(): A__ = round(metrics["test_loss"] , 4 ) handle_metrics("test" , _lowerCamelCase , training_args.output_dir ) all_metrics.update(_lowerCamelCase ) if training_args.predict_with_generate: A__ = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase ) A__ = lmap(str.strip , _lowerCamelCase ) write_txt_file(_lowerCamelCase , os.path.join(training_args.output_dir , "test_generations.txt" ) ) if trainer.is_world_process_zero(): save_json(_lowerCamelCase , os.path.join(training_args.output_dir , "all_results.json" ) ) return all_metrics def UpperCamelCase ( _lowerCamelCase : int ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
237
'''simple docstring''' def UpperCamelCase ( _lowerCamelCase : int | float | str ): try: A__ = float(_lowerCamelCase ) except ValueError: raise ValueError("Please enter a valid number" ) A__ = decimal - int(_lowerCamelCase ) if fractional_part == 0: return int(_lowerCamelCase ), 1 else: A__ = len(str(_lowerCamelCase ).split("." )[1] ) A__ = int(decimal * (10**number_of_frac_digits) ) A__ = 10**number_of_frac_digits A__, A__ = denominator, numerator while True: A__ = dividend % divisor if remainder == 0: break A__, A__ = divisor, remainder A__, A__ = numerator / divisor, denominator / divisor return int(_lowerCamelCase ), int(_lowerCamelCase ) if __name__ == "__main__": print(f"""{decimal_to_fraction(2) = }""") print(f"""{decimal_to_fraction(89.0) = }""") print(f"""{decimal_to_fraction("67") = }""") print(f"""{decimal_to_fraction("45.0") = }""") print(f"""{decimal_to_fraction(1.5) = }""") print(f"""{decimal_to_fraction("6.25") = }""") print(f"""{decimal_to_fraction("78td") = }""")
237
1
"""simple docstring""" import json import os import unittest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase_ ( _lowercase , unittest.TestCase): snake_case__ = CLIPTokenizer snake_case__ = CLIPTokenizerFast snake_case__ = True snake_case__ = {} snake_case__ = False def _UpperCamelCase ( self : Tuple ) -> Optional[Any]: super().setUp() # fmt: off _UpperCamelCase = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on _UpperCamelCase = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) ) _UpperCamelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>'''] _UpperCamelCase = {'''unk_token''': '''<unk>'''} _UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__UpperCamelCase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__UpperCamelCase ) ) def _UpperCamelCase ( self : Tuple , **__UpperCamelCase : str ) -> Optional[Any]: kwargs.update(self.special_tokens_map ) return CLIPTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase ) def _UpperCamelCase ( self : List[Any] , **__UpperCamelCase : Union[str, Any] ) -> Optional[Any]: kwargs.update(self.special_tokens_map ) return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCamelCase ) def _UpperCamelCase ( self : str , __UpperCamelCase : str ) -> int: _UpperCamelCase = '''lower newer''' _UpperCamelCase = '''lower newer''' return input_text, output_text def _UpperCamelCase ( self : int ) -> Union[str, Any]: _UpperCamelCase = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) _UpperCamelCase = '''lower newer''' _UpperCamelCase = ['''lo''', '''w''', '''er</w>''', '''n''', '''e''', '''w''', '''er</w>'''] _UpperCamelCase = tokenizer.tokenize(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCamelCase = tokens + [tokenizer.unk_token] _UpperCamelCase = [10, 2, 16, 9, 3, 2, 16, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase ) @require_ftfy def _UpperCamelCase ( self : int ) -> Optional[Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _UpperCamelCase = self.tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase ) _UpperCamelCase = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase ) _UpperCamelCase = '''A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.''' _UpperCamelCase = tokenizer_s.tokenize(__UpperCamelCase ) _UpperCamelCase = tokenizer_r.tokenize(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) # Test that the tokenization is identical on an example containing a character (Latin Small Letter A # with Tilde) encoded in 2 different ways _UpperCamelCase = '''xa\u0303y''' + ''' ''' + '''x\xe3y''' _UpperCamelCase = tokenizer_s.tokenize(__UpperCamelCase ) _UpperCamelCase = tokenizer_r.tokenize(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) # Test that the tokenization is identical on unicode of space type _UpperCamelCase = [ '''\u0009''', # (horizontal tab, '\t') '''\u000B''', # (vertical tab) '''\u000C''', # (form feed) '''\u0020''', # (space, ' ') '''\u200E''', # (left-to-right mark):w '''\u200F''', # (right-to-left mark) ] for unicode_seq in spaces_unicodes: _UpperCamelCase = tokenizer_s.tokenize(__UpperCamelCase ) _UpperCamelCase = tokenizer_r.tokenize(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) # Test that the tokenization is identical on unicode of line break type _UpperCamelCase = [ '''\u000A''', # (line feed, '\n') '''\r\n''', # (carriage return and line feed, '\r\n') '''\u000D''', # (carriage return, '\r') '''\r''', # (carriage return, '\r') '''\u000D''', # (carriage return, '\r') '''\u2028''', # (line separator) '''\u2029''', # (paragraph separator) # "\u0085", # (next line) ] # The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms # it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a # space (and thus into an empty list). for unicode_seq in line_break_unicodes: _UpperCamelCase = tokenizer_s.tokenize(__UpperCamelCase ) _UpperCamelCase = tokenizer_r.tokenize(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) def _UpperCamelCase ( self : Dict ) -> Optional[Any]: # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _UpperCamelCase = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name` _UpperCamelCase = F'''{text_of_1_token} {text_of_1_token}''' _UpperCamelCase = self.rust_tokenizer_class.from_pretrained( __UpperCamelCase , use_fast=__UpperCamelCase , ) _UpperCamelCase = tokenizer_r(__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , add_special_tokens=__UpperCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__UpperCamelCase ) + 1, len(__UpperCamelCase ) + 1 + len(__UpperCamelCase )) , ) _UpperCamelCase = F''' {text}''' _UpperCamelCase = self.rust_tokenizer_class.from_pretrained( __UpperCamelCase , use_fast=__UpperCamelCase , ) _UpperCamelCase = tokenizer_r(__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , add_special_tokens=__UpperCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__UpperCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__UpperCamelCase ) + 1, 1 + len(__UpperCamelCase ) + 1 + len(__UpperCamelCase )) , ) def _UpperCamelCase ( self : Dict ) -> List[str]: # Test related to the breaking change introduced in transformers v4.17.0 # We need to check that an error in raised when the user try to load a previous version of the tokenizer. with self.assertRaises(__UpperCamelCase ) as context: self.rust_tokenizer_class.from_pretrained('''robot-test/old-clip-tokenizer''' ) self.assertTrue( context.exception.args[0].startswith( '''The `backend_tokenizer` provided does not match the expected format.''' ) ) @require_ftfy def _UpperCamelCase ( self : List[Any] ) -> Tuple: super().test_tokenization_python_rust_equals() def _UpperCamelCase ( self : Any ) -> Optional[Any]: # CLIP always lower cases letters pass
366
"""simple docstring""" import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class UpperCAmelCase_ ( unittest.TestCase): def __init__( self : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : str=13 , __UpperCamelCase : Union[str, Any]=7 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : List[Any]=True , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Any=True , __UpperCamelCase : str=99 , __UpperCamelCase : int=32 , __UpperCamelCase : Tuple=5 , __UpperCamelCase : Dict=4 , __UpperCamelCase : str=37 , __UpperCamelCase : List[Any]="gelu" , __UpperCamelCase : Optional[Any]=0.1 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : str=512 , __UpperCamelCase : Union[str, Any]=16 , __UpperCamelCase : List[Any]=2 , __UpperCamelCase : Optional[Any]=0.0_2 , __UpperCamelCase : List[Any]=4 , ) -> Optional[int]: _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = seq_length _UpperCamelCase = is_training _UpperCamelCase = use_attention_mask _UpperCamelCase = use_token_type_ids _UpperCamelCase = use_labels _UpperCamelCase = vocab_size _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = max_position_embeddings _UpperCamelCase = type_vocab_size _UpperCamelCase = type_sequence_label_size _UpperCamelCase = initializer_range _UpperCamelCase = num_choices def _UpperCamelCase ( self : Optional[int] ) -> List[Any]: _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCamelCase = None if self.use_attention_mask: _UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCamelCase = None if self.use_token_type_ids: _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCamelCase = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _UpperCamelCase ( self : List[Any] ) -> Any: _UpperCamelCase = self.prepare_config_and_inputs() _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs _UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict @require_flax class UpperCAmelCase_ ( _lowercase , unittest.TestCase): snake_case__ = True snake_case__ = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def _UpperCamelCase ( self : Optional[int] ) -> Dict: _UpperCamelCase = FlaxRoFormerModelTester(self ) @slow def _UpperCamelCase ( self : Tuple ) -> List[Any]: for model_class_name in self.all_model_classes: _UpperCamelCase = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=__UpperCamelCase ) _UpperCamelCase = model(np.ones((1, 1) ) ) self.assertIsNotNone(__UpperCamelCase ) @require_flax class UpperCAmelCase_ ( unittest.TestCase): @slow def _UpperCamelCase ( self : Dict ) -> int: _UpperCamelCase = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' ) _UpperCamelCase = jnp.array([[0, 1, 2, 3, 4, 5]] ) _UpperCamelCase = model(__UpperCamelCase )[0] _UpperCamelCase = 5_0000 _UpperCamelCase = (1, 6, vocab_size) self.assertEqual(output.shape , __UpperCamelCase ) _UpperCamelCase = jnp.array( [[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1E-4 ) )
54
0
def __UpperCamelCase ( _A : Dict ) ->Any: """simple docstring""" lowerCamelCase_ =len(_A ) for _ in range(_A ): for i in range(_ % 2 , arr_size - 1 , 2 ): if arr[i + 1] < arr[i]: lowerCamelCase_ =arr[i + 1], arr[i] return arr if __name__ == "__main__": __A : List[Any] = list(range(10, 0, -1)) print(F"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
154
import math import os import sys def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = '''''' try: with open(snake_case , '''rb''' ) as binary_file: __SCREAMING_SNAKE_CASE : int = binary_file.read() for dat in data: __SCREAMING_SNAKE_CASE : Optional[Any] = F'''{dat:08b}''' result += curr_byte return result except OSError: print('''File not accessible''' ) sys.exit() def a__ ( snake_case , snake_case , snake_case , snake_case ): """simple docstring""" lexicon.pop(snake_case ) __SCREAMING_SNAKE_CASE : List[str] = last_match_id if math.loga(snake_case ).is_integer(): for curr_key in lexicon: __SCREAMING_SNAKE_CASE : int = '''0''' + lexicon[curr_key] __SCREAMING_SNAKE_CASE : List[str] = bin(snake_case )[2:] def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = {'''0''': '''0''', '''1''': '''1'''} __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = '''''', '''''' __SCREAMING_SNAKE_CASE : Optional[Any] = len(snake_case ) for i in range(len(snake_case ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue __SCREAMING_SNAKE_CASE : Any = lexicon[curr_string] result += last_match_id add_key_to_lexicon(snake_case , snake_case , snake_case , snake_case ) index += 1 __SCREAMING_SNAKE_CASE : Tuple = '''''' while curr_string != "" and curr_string not in lexicon: curr_string += "0" if curr_string != "": __SCREAMING_SNAKE_CASE : Dict = lexicon[curr_string] result += last_match_id return result def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = os.path.getsize(snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = bin(snake_case )[2:] __SCREAMING_SNAKE_CASE : int = len(snake_case ) return "0" * (length_length - 1) + file_length_binary + compressed def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = 8 try: with open(snake_case , '''wb''' ) as opened_file: __SCREAMING_SNAKE_CASE : Optional[int] = [ to_write[i : i + byte_length] for i in range(0 , len(snake_case ) , snake_case ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append('''10000000''' ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array: opened_file.write(int(snake_case , 2 ).to_bytes(1 , byteorder='''big''' ) ) except OSError: print('''File not accessible''' ) sys.exit() def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = read_file_binary(snake_case ) __SCREAMING_SNAKE_CASE : Optional[int] = compress_data(snake_case ) __SCREAMING_SNAKE_CASE : Dict = add_file_length(snake_case , snake_case ) write_file_binary(snake_case , snake_case ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
303
0
from collections.abc import Sequence def __snake_case ( _lowerCAmelCase : Sequence[int] | None = None ) -> int: if nums is None or not nums: raise ValueError("Input sequence should not be empty" ) A_ : Any = nums[0] for i in range(1 , len(_lowerCAmelCase ) ): A_ : Any = nums[i] A_ : List[str] = max(_lowerCAmelCase , ans + num , _lowerCAmelCase ) return ans if __name__ == "__main__": import doctest doctest.testmod() # Try on a sample input from the user _lowerCAmelCase : List[Any] = int(input('''Enter number of elements : ''').strip()) _lowerCAmelCase : Dict = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n] print(max_subsequence_sum(array))
70
from math import pi, sqrt def __snake_case ( _lowerCAmelCase : float ) -> float: if num <= 0: raise ValueError("math domain error" ) if num > 1_71.5: raise OverflowError("math range error" ) elif num - int(_lowerCAmelCase ) not in (0, 0.5): raise NotImplementedError("num must be an integer or a half-integer" ) elif num == 0.5: return sqrt(_lowerCAmelCase ) else: return 1.0 if num == 1 else (num - 1) * gamma(num - 1 ) def __snake_case ( ) -> None: assert gamma(0.5 ) == sqrt(_lowerCAmelCase ) assert gamma(1 ) == 1.0 assert gamma(2 ) == 1.0 if __name__ == "__main__": from doctest import testmod testmod() _lowerCAmelCase : List[str] = 1.0 while num: _lowerCAmelCase : List[str] = float(input('''Gamma of: ''')) print(F'''gamma({num}) = {gamma(num)}''') print('''\nEnter 0 to exit...''')
70
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a_ : Dict = logging.get_logger(__name__) a_ : Optional[int] = { """google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""", """google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""", # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class _snake_case ( lowercase_ ): _lowercase : Union[str, Any] = '''mobilenet_v1''' def __init__( self , a=3 , a=224 , a=1.0 , a=8 , a="relu6" , a=True , a=0.9_99 , a=0.02 , a=0.0_01 , **a , ) -> Tuple: super().__init__(**SCREAMING_SNAKE_CASE_) if depth_multiplier <= 0: raise ValueError('depth_multiplier must be greater than zero.') SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = depth_multiplier SCREAMING_SNAKE_CASE = min_depth SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = tf_padding SCREAMING_SNAKE_CASE = classifier_dropout_prob SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = layer_norm_eps class _snake_case ( lowercase_ ): _lowercase : List[Any] = version.parse('''1.11''' ) @property def SCREAMING_SNAKE_CASE__ ( self) -> Dict: return OrderedDict([('pixel_values', {0: 'batch'})]) @property def SCREAMING_SNAKE_CASE__ ( self) -> List[str]: if self.task == "image-classification": return OrderedDict([('logits', {0: 'batch'})]) else: return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})]) @property def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]: return 1E-4
137
"""simple docstring""" import warnings from diffusers import StableDiffusionImgaImgPipeline # noqa F401 warnings.warn( """The `image_to_image.py` script is outdated. Please use directly `from diffusers import""" """ StableDiffusionImg2ImgPipeline` instead.""" )
224
0
def UpperCAmelCase_( a__ ): """simple docstring""" return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6''') ) def UpperCAmelCase_( a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = credit_card_number SCREAMING_SNAKE_CASE : Tuple = 0 SCREAMING_SNAKE_CASE : Dict = len(a__ ) - 2 for i in range(a__ , -1 , -2 ): # double the value of every second digit SCREAMING_SNAKE_CASE : Optional[int] = int(cc_number[i] ) digit *= 2 # If doubling of a number results in a two digit number # i.e greater than 9(e.g., 6 × 2 = 12), # then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6), # to get a single digit number. if digit > 9: digit %= 10 digit += 1 SCREAMING_SNAKE_CASE : Union[str, Any] = cc_number[:i] + str(a__ ) + cc_number[i + 1 :] total += digit # Sum up the remaining digits for i in range(len(a__ ) - 1 , -1 , -2 ): total += int(cc_number[i] ) return total % 10 == 0 def UpperCAmelCase_( a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = F"""{credit_card_number} is an invalid credit card number because""" if not credit_card_number.isdigit(): print(F"""{error_message} it has nonnumerical characters.""" ) return False if not 13 <= len(a__ ) <= 16: print(F"""{error_message} of its length.""" ) return False if not validate_initial_digits(a__ ): print(F"""{error_message} of its first two digits.""" ) return False if not luhn_validation(a__ ): print(F"""{error_message} it fails the Luhn check.""" ) return False print(F"""{credit_card_number} is a valid credit card number.""" ) return True if __name__ == "__main__": import doctest doctest.testmod() validate_credit_card_number('''4111111111111111''') validate_credit_card_number('''32323''')
19
import math import time from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import SeqaSeqTrainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class a_ ( a__ ): """simple docstring""" def __init__( self , *_lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase ) ->int: super().__init__(*_lowerCamelCase , **_lowerCamelCase ) SCREAMING_SNAKE_CASE : Dict = eval_examples SCREAMING_SNAKE_CASE : Optional[int] = post_process_function def __lowerCAmelCase ( self , _lowerCamelCase = None , _lowerCamelCase=None , _lowerCamelCase = None , _lowerCamelCase = "eval" , **_lowerCamelCase , ) ->Dict[str, float]: SCREAMING_SNAKE_CASE : Any = gen_kwargs.copy() SCREAMING_SNAKE_CASE : str = ( gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length ) SCREAMING_SNAKE_CASE : Dict = ( gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams ) SCREAMING_SNAKE_CASE : Any = gen_kwargs SCREAMING_SNAKE_CASE : List[Any] = self.eval_dataset if eval_dataset is None else eval_dataset SCREAMING_SNAKE_CASE : str = self.get_eval_dataloader(_lowerCamelCase ) SCREAMING_SNAKE_CASE : List[str] = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. SCREAMING_SNAKE_CASE : Optional[Any] = self.compute_metrics SCREAMING_SNAKE_CASE : str = None SCREAMING_SNAKE_CASE : Optional[Any] = time.time() SCREAMING_SNAKE_CASE : List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: SCREAMING_SNAKE_CASE : Tuple = eval_loop( _lowerCamelCase , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCamelCase , metric_key_prefix=_lowerCamelCase , ) finally: SCREAMING_SNAKE_CASE : Dict = compute_metrics SCREAMING_SNAKE_CASE : Tuple = self.args.eval_batch_size * self.args.world_size if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( _lowerCamelCase , _lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default SCREAMING_SNAKE_CASE : Tuple = self.post_process_function(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = self.compute_metrics(_lowerCamelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"""{metric_key_prefix}_""" ): SCREAMING_SNAKE_CASE : Optional[int] = metrics.pop(_lowerCamelCase ) metrics.update(output.metrics ) else: SCREAMING_SNAKE_CASE : List[Any] = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(_lowerCamelCase ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) SCREAMING_SNAKE_CASE : int = self.callback_handler.on_evaluate(self.args , self.state , self.control , _lowerCamelCase ) return metrics def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase = "test" , **_lowerCamelCase ) ->int: SCREAMING_SNAKE_CASE : str = gen_kwargs.copy() SCREAMING_SNAKE_CASE : str = self.get_test_dataloader(_lowerCamelCase ) # Temporarily disable metric computation, we will do it in the loop here. SCREAMING_SNAKE_CASE : Dict = self.compute_metrics SCREAMING_SNAKE_CASE : Tuple = None SCREAMING_SNAKE_CASE : List[str] = time.time() SCREAMING_SNAKE_CASE : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: SCREAMING_SNAKE_CASE : Any = eval_loop( _lowerCamelCase , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCamelCase , metric_key_prefix=_lowerCamelCase , ) finally: SCREAMING_SNAKE_CASE : Optional[int] = compute_metrics SCREAMING_SNAKE_CASE : List[Any] = self.args.eval_batch_size * self.args.world_size if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( _lowerCamelCase , _lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output SCREAMING_SNAKE_CASE : Tuple = self.post_process_function(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , '''predict''' ) SCREAMING_SNAKE_CASE : Dict = self.compute_metrics(_lowerCamelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"""{metric_key_prefix}_""" ): SCREAMING_SNAKE_CASE : List[Any] = metrics.pop(_lowerCamelCase ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_lowerCamelCase )
19
1
def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Union[str, Any] = 0 SCREAMING_SNAKE_CASE_: Any = len(_UpperCAmelCase ) for i in range(n - 1 ): for j in range(i + 1 , _UpperCAmelCase ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def A_ ( _UpperCAmelCase ): if len(_UpperCAmelCase ) <= 1: return arr, 0 SCREAMING_SNAKE_CASE_: str = len(_UpperCAmelCase ) // 2 SCREAMING_SNAKE_CASE_: List[str] = arr[0:mid] SCREAMING_SNAKE_CASE_: Dict = arr[mid:] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = count_inversions_recursive(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = count_inversions_recursive(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = _count_cross_inversions(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[str] = inversion_p + inversions_q + cross_inversions return c, num_inversions def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Dict = [] SCREAMING_SNAKE_CASE_: List[Any] = 0 while i < len(_UpperCAmelCase ) and j < len(_UpperCAmelCase ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(_UpperCAmelCase ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(_UpperCAmelCase ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def A_ ( ): SCREAMING_SNAKE_CASE_: Union[str, Any] = [10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) SCREAMING_SNAKE_CASE_: Optional[int] = count_inversions_bf(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = count_inversions_recursive(_UpperCAmelCase ) assert num_inversions_bf == num_inversions_recursive == 8 print("number of inversions = " , _UpperCAmelCase ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() SCREAMING_SNAKE_CASE_: str = count_inversions_bf(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = count_inversions_recursive(_UpperCAmelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("number of inversions = " , _UpperCAmelCase ) # an empty list should also have zero inversions SCREAMING_SNAKE_CASE_: List[Any] = [] SCREAMING_SNAKE_CASE_: Union[str, Any] = count_inversions_bf(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = count_inversions_recursive(_UpperCAmelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("number of inversions = " , _UpperCAmelCase ) if __name__ == "__main__": main()
13
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
13
1
def a ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : int = 0 ): """simple docstring""" UpperCamelCase : Optional[Any] = length or len(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Any = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: UpperCamelCase , UpperCamelCase : int = list_data[i + 1], list_data[i] UpperCamelCase : int = True return list_data if not swapped else bubble_sort(SCREAMING_SNAKE_CASE_ , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
315
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] ): """simple docstring""" UpperCamelCase : list[list[float]] = [] for data in source_data: for i, el in enumerate(SCREAMING_SNAKE_CASE_ ): if len(SCREAMING_SNAKE_CASE_ ) < i + 1: data_lists.append([] ) data_lists[i].append(float(SCREAMING_SNAKE_CASE_ ) ) return data_lists def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] , SCREAMING_SNAKE_CASE_ : list[int] ): """simple docstring""" UpperCamelCase : list[list[float]] = [] for dlist, weight in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Tuple = max(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : list[float] = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: UpperCamelCase : Dict = F"""Invalid weight of {weight:f} provided""" raise ValueError(SCREAMING_SNAKE_CASE_ ) score_lists.append(SCREAMING_SNAKE_CASE_ ) return score_lists def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] ): """simple docstring""" UpperCamelCase : list[float] = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(SCREAMING_SNAKE_CASE_ ): UpperCamelCase : str = final_scores[j] + ele return final_scores def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] , SCREAMING_SNAKE_CASE_ : list[int] ): """simple docstring""" UpperCamelCase : str = get_data(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = calculate_each_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = generate_final_scores(SCREAMING_SNAKE_CASE_ ) # append scores to source data for i, ele in enumerate(SCREAMING_SNAKE_CASE_ ): source_data[i].append(SCREAMING_SNAKE_CASE_ ) return source_data
315
1
"""simple docstring""" import math def __magic_name__ ( __snake_case : int ) -> str: lowercase : Optional[int] = 0 lowercase : Optional[int] = 0 while num > 0: lowercase : Dict = num % 8 lowercase : List[Any] = octal + (remainder * math.floor(math.pow(10 , __snake_case ) )) counter += 1 lowercase : Optional[Any] = math.floor(num / 8 ) # basically /= 8 without remainder if any # This formatting removes trailing '.0' from `octal`. return f"""0o{int(__snake_case )}""" def __magic_name__ ( ) -> None: print("\n2 in octal is:" ) print(decimal_to_octal(2 ) ) # = 2 print("\n8 in octal is:" ) print(decimal_to_octal(8 ) ) # = 10 print("\n65 in octal is:" ) print(decimal_to_octal(65 ) ) # = 101 print("\n216 in octal is:" ) print(decimal_to_octal(216 ) ) # = 330 print("\n512 in octal is:" ) print(decimal_to_octal(512 ) ) # = 1000 print("\n" ) if __name__ == "__main__": main()
202
"""simple docstring""" import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask _A : Optional[int] = logging.getLogger(__name__) class a__ ( a_ ): def __init__( self , _a=-1 ): # in NER datasets, the last column is usually reserved for NER label lowercase : List[str] = label_idx def __magic_name__ ( self , _a , _a ): if isinstance(_a , _a ): lowercase : Optional[Any] = mode.value lowercase : List[str] = os.path.join(_a , f"""{mode}.txt""" ) lowercase : str = 1 lowercase : Optional[int] = [] with open(_a , encoding="utf-8" ) as f: lowercase : List[Any] = [] lowercase : Optional[int] = [] for line in f: if line.startswith("-DOCSTART-" ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=_a , labels=_a ) ) guid_index += 1 lowercase : int = [] lowercase : int = [] else: lowercase : Optional[Any] = line.split(" " ) words.append(splits[0] ) if len(_a ) > 1: labels.append(splits[self.label_idx].replace("\n" , "" ) ) else: # Examples could have no label for mode = "test" labels.append("O" ) if words: examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=_a , labels=_a ) ) return examples def __magic_name__ ( self , _a , _a , _a ): lowercase : List[str] = 0 for line in test_input_reader: if line.startswith("-DOCSTART-" ) or line == "" or line == "\n": writer.write(_a ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: lowercase : Any = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n" writer.write(_a ) else: logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] ) def __magic_name__ ( self , _a ): if path: with open(_a , "r" ) as f: lowercase : Optional[Any] = f.read().splitlines() if "O" not in labels: lowercase : List[Any] = ["O"] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class a__ ( a_ ): def __init__( self ): # in CONLL2003 dataset chunk column is second-to-last super().__init__(label_idx=-2 ) def __magic_name__ ( self , _a ): if path: with open(_a , "r" ) as f: lowercase : Tuple = f.read().splitlines() if "O" not in labels: lowercase : Optional[int] = ["O"] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class a__ ( a_ ): def __magic_name__ ( self , _a , _a ): if isinstance(_a , _a ): lowercase : List[Any] = mode.value lowercase : Optional[int] = os.path.join(_a , f"""{mode}.txt""" ) lowercase : Tuple = 1 lowercase : List[str] = [] with open(_a , encoding="utf-8" ) as f: for sentence in parse_incr(_a ): lowercase : Optional[Any] = [] lowercase : str = [] for token in sentence: words.append(token["form"] ) labels.append(token["upos"] ) assert len(_a ) == len(_a ) if words: examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=_a , labels=_a ) ) guid_index += 1 return examples def __magic_name__ ( self , _a , _a , _a ): lowercase : str = 0 for sentence in parse_incr(_a ): lowercase : List[Any] = preds_list[example_id] lowercase : List[str] = "" for token in sentence: out += f"""{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) """ out += "\n" writer.write(_a ) example_id += 1 def __magic_name__ ( self , _a ): if path: with open(_a , "r" ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
202
1
"""simple docstring""" import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline __UpperCamelCase = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False) parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''') parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''') __UpperCamelCase = parser.parse_args() __UpperCamelCase = '''cpu''' __UpperCamelCase = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings''' __UpperCamelCase = '''path-to-your-trained-model''' __UpperCamelCase = StableDiffusionPipeline.from_pretrained(model_id) if args.dpm: __UpperCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) __UpperCamelCase = pipe.to(device) # to channels last __UpperCamelCase = pipe.unet.to(memory_format=torch.channels_last) __UpperCamelCase = pipe.vae.to(memory_format=torch.channels_last) __UpperCamelCase = pipe.text_encoder.to(memory_format=torch.channels_last) if pipe.requires_safety_checker: __UpperCamelCase = pipe.safety_checker.to(memory_format=torch.channels_last) # optimize with ipex __UpperCamelCase = torch.randn(2, 4, 64, 64) __UpperCamelCase = torch.rand(1) * 999 __UpperCamelCase = torch.randn(2, 77, 768) __UpperCamelCase = (sample, timestep, encoder_hidden_status) try: __UpperCamelCase = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example) except Exception: __UpperCamelCase = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True) __UpperCamelCase = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True) __UpperCamelCase = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True) if pipe.requires_safety_checker: __UpperCamelCase = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True) # compute __UpperCamelCase = 666 __UpperCamelCase = torch.Generator(device).manual_seed(seed) __UpperCamelCase = {'''generator''': generator} if args.steps is not None: __UpperCamelCase = args.steps with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa): __UpperCamelCase = pipe(prompt, **generate_kwargs).images[0] # save image image.save('''generated.png''')
312
"""simple docstring""" from __future__ import annotations import math def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int: if depth < 0: raise ValueError('Depth cannot be less than 0' ) if len(UpperCAmelCase ) == 0: raise ValueError('Scores cannot be empty' ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , ) return min( minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , ) def UpperCAmelCase ( ) -> None: snake_case_ = [90, 23, 6, 33, 21, 65, 123, 34423] snake_case_ = math.log(len(UpperCAmelCase ) , 2 ) print('Optimal value : ' , end='' ) print(minimax(0 , 0 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
312
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { """google/canine-s""": """https://huggingface.co/google/canine-s/resolve/main/config.json""", # See all CANINE models at https://huggingface.co/models?filter=canine } class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''canine''' def __init__( self : Any , lowerCamelCase_ : Optional[int]=7_68 , lowerCamelCase_ : List[Any]=12 , lowerCamelCase_ : List[str]=12 , lowerCamelCase_ : List[str]=30_72 , lowerCamelCase_ : List[Any]="gelu" , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : List[str]=1_63_84 , lowerCamelCase_ : List[str]=16 , lowerCamelCase_ : int=0.02 , lowerCamelCase_ : Optional[Any]=1e-12 , lowerCamelCase_ : Dict=0 , lowerCamelCase_ : List[Any]=0Xe_000 , lowerCamelCase_ : str=0Xe_001 , lowerCamelCase_ : Any=4 , lowerCamelCase_ : Optional[Any]=4 , lowerCamelCase_ : Union[str, Any]=8 , lowerCamelCase_ : Any=1_63_84 , lowerCamelCase_ : Dict=1_28 , **lowerCamelCase_ : Any , ): '''simple docstring''' super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = max_position_embeddings SCREAMING_SNAKE_CASE : List[Any] = hidden_size SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers SCREAMING_SNAKE_CASE : int = num_attention_heads SCREAMING_SNAKE_CASE : List[Any] = intermediate_size SCREAMING_SNAKE_CASE : Tuple = hidden_act SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range SCREAMING_SNAKE_CASE : str = type_vocab_size SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps # Character config: SCREAMING_SNAKE_CASE : Optional[Any] = downsampling_rate SCREAMING_SNAKE_CASE : List[str] = upsampling_kernel_size SCREAMING_SNAKE_CASE : Optional[Any] = num_hash_functions SCREAMING_SNAKE_CASE : Tuple = num_hash_buckets SCREAMING_SNAKE_CASE : Tuple = local_transformer_stride
323
'''simple docstring''' from typing import TYPE_CHECKING from ..utils import _LazyModule __UpperCAmelCase = { """config""": [ """EXTERNAL_DATA_FORMAT_SIZE_LIMIT""", """OnnxConfig""", """OnnxConfigWithPast""", """OnnxSeq2SeqConfigWithPast""", """PatchingSpec""", ], """convert""": ["""export""", """validate_model_outputs"""], """features""": ["""FeaturesManager"""], """utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
323
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCamelCase_ : Tuple = { """configuration_vision_text_dual_encoder""": ["""VisionTextDualEncoderConfig"""], """processing_vision_text_dual_encoder""": ["""VisionTextDualEncoderProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ : Any = ["""VisionTextDualEncoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ : str = ["""FlaxVisionTextDualEncoderModel"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ : Union[str, Any] = ["""TFVisionTextDualEncoderModel"""] if TYPE_CHECKING: from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel else: import sys lowerCamelCase_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
364
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available lowerCamelCase_ : Dict = {"""configuration_speech_encoder_decoder""": ["""SpeechEncoderDecoderConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ : Optional[Any] = ["""SpeechEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ : Dict = ["""FlaxSpeechEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys lowerCamelCase_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
197
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _lowercase : List[Any] = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Optional[Any] = ["PLBartTokenizer"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : int = [ "PLBART_PRETRAINED_MODEL_ARCHIVE_LIST", "PLBartForCausalLM", "PLBartForConditionalGeneration", "PLBartForSequenceClassification", "PLBartModel", "PLBartPreTrainedModel", ] if TYPE_CHECKING: from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_plbart import PLBartTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_plbart import ( PLBART_PRETRAINED_MODEL_ARCHIVE_LIST, PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, PLBartModel, PLBartPreTrainedModel, ) else: import sys _lowercase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure)
238
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _lowercase : List[Any] = { "configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"], "tokenization_tapas": ["TapasTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Optional[Any] = [ "TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST", "TapasForMaskedLM", "TapasForQuestionAnswering", "TapasForSequenceClassification", "TapasModel", "TapasPreTrainedModel", "load_tf_weights_in_tapas", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : str = [ "TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST", "TFTapasForMaskedLM", "TFTapasForQuestionAnswering", "TFTapasForSequenceClassification", "TFTapasModel", "TFTapasPreTrainedModel", ] if TYPE_CHECKING: from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig from .tokenization_tapas import TapasTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tapas import ( TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasPreTrainedModel, load_tf_weights_in_tapas, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_tapas import ( TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TFTapasForMaskedLM, TFTapasForQuestionAnswering, TFTapasForSequenceClassification, TFTapasModel, TFTapasPreTrainedModel, ) else: import sys _lowercase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
238
1
import pytest from datasets.parallel import ParallelBackendConfig, parallel_backend from datasets.utils.py_utils import map_nested from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows def SCREAMING_SNAKE_CASE__ ( __a ): # picklable for multiprocessing return i + 1 @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows def SCREAMING_SNAKE_CASE__ ( ): with parallel_backend('spark' ): assert ParallelBackendConfig.backend_name == "spark" snake_case_ : Optional[Any] = [1, 2, 3] with pytest.raises(__a ): with parallel_backend('unsupported backend' ): map_nested(__a , __a , num_proc=2 ) with pytest.raises(__a ): with parallel_backend('unsupported backend' ): map_nested(__a , __a , num_proc=-1 ) @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows @pytest.mark.parametrize('num_proc' , [2, -1] ) def SCREAMING_SNAKE_CASE__ ( __a ): snake_case_ : Optional[Any] = [1, 2] snake_case_ : Optional[Any] = {'a': 1, 'b': 2} snake_case_ : int = {'a': [1, 2], 'b': [3, 4]} snake_case_ : int = {'a': {'1': 1}, 'b': 2} snake_case_ : int = {'a': 1, 'b': 2, 'c': 3, 'd': 4} snake_case_ : Any = [2, 3] snake_case_ : Tuple = {'a': 2, 'b': 3} snake_case_ : str = {'a': [2, 3], 'b': [4, 5]} snake_case_ : List[str] = {'a': {'1': 2}, 'b': 3} snake_case_ : Union[str, Any] = {'a': 2, 'b': 3, 'c': 4, 'd': 5} with parallel_backend('spark' ): assert map_nested(__a , __a , num_proc=__a ) == expected_map_nested_sa assert map_nested(__a , __a , num_proc=__a ) == expected_map_nested_sa assert map_nested(__a , __a , num_proc=__a ) == expected_map_nested_sa assert map_nested(__a , __a , num_proc=__a ) == expected_map_nested_sa assert map_nested(__a , __a , num_proc=__a ) == expected_map_nested_sa
362
from decimal import Decimal, getcontext from math import ceil, factorial def SCREAMING_SNAKE_CASE__ ( __a ): if not isinstance(__a , __a ): raise TypeError('Undefined for non-integers' ) elif precision < 1: raise ValueError('Undefined for non-natural numbers' ) snake_case_ : Dict = precision snake_case_ : str = ceil(precision / 14 ) snake_case_ : str = 42_68_80 * Decimal(1_00_05 ).sqrt() snake_case_ : Tuple = 1 snake_case_ : int = 13_59_14_09 snake_case_ : Tuple = Decimal(__a ) for k in range(1 , __a ): snake_case_ : List[Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(__a ) ** 3) linear_term += 5_45_14_01_34 exponential_term *= -26_25_37_41_26_40_76_80_00 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": _SCREAMING_SNAKE_CASE = 50 print(F'''The first {n} digits of pi is: {pi(n)}''')
88
0
'''simple docstring''' def __lowerCamelCase ( __snake_case : Union[str, Any] ) -> int: """simple docstring""" if not head: return True # split the list to two parts A__ , A__ : Dict =head.next, head while fast and fast.next: A__ : Optional[Any] =fast.next.next A__ : Any =slow.next A__ : List[Any] =slow.next A__ : Dict =None # Don't forget here! But forget still works! # reverse the second part A__ : str =None while second: A__ : Optional[int] =second.next A__ : Optional[int] =node A__ : str =second A__ : int =nxt # compare two parts # second part has the same or one less node while node: if node.val != head.val: return False A__ : int =node.next A__ : int =head.next return True def __lowerCamelCase ( __snake_case : Any ) -> List[str]: """simple docstring""" if not head or not head.next: return True # 1. Get the midpoint (slow) A__ : List[Any] =head while fast and fast.next: A__ , A__ : Optional[Any] =fast.next.next, slow.next # 2. Push the second half into the stack A__ : str =[slow.val] while slow.next: A__ : str =slow.next stack.append(slow.val ) # 3. Comparison while stack: if stack.pop() != cur.val: return False A__ : Optional[int] =cur.next return True def __lowerCamelCase ( __snake_case : Optional[int] ) -> List[Any]: """simple docstring""" if not head or not head.next: return True A__ : Dict ={} A__ : List[str] =0 while head: if head.val in d: d[head.val].append(__snake_case ) else: A__ : List[str] =[pos] A__ : Optional[Any] =head.next pos += 1 A__ : Dict =pos - 1 A__ : Any =0 for v in d.values(): if len(__snake_case ) % 2 != 0: middle += 1 else: A__ : Tuple =0 for i in range(0, len(__snake_case ) ): if v[i] + v[len(__snake_case ) - 1 - step] != checksum: return False step += 1 if middle > 1: return False return True
134
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roformer import RoFormerTokenizer from .tokenization_utils import JiebaPreTokenizer __snake_case : Optional[int] = logging.get_logger(__name__) __snake_case : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} __snake_case : Optional[Any] = { 'vocab_file': { 'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt', 'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt', 'junnyu/roformer_chinese_char_small': ( 'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt' ), 'junnyu/roformer_chinese_char_base': ( 'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt' ), 'junnyu/roformer_small_discriminator': ( 'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt' ), 'junnyu/roformer_small_generator': ( 'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt' ), } } __snake_case : Tuple = { 'junnyu/roformer_chinese_small': 1536, 'junnyu/roformer_chinese_base': 1536, 'junnyu/roformer_chinese_char_small': 512, 'junnyu/roformer_chinese_char_base': 512, 'junnyu/roformer_small_discriminator': 128, 'junnyu/roformer_small_generator': 128, } __snake_case : Optional[Any] = { 'junnyu/roformer_chinese_small': {'do_lower_case': True}, 'junnyu/roformer_chinese_base': {'do_lower_case': True}, 'junnyu/roformer_chinese_char_small': {'do_lower_case': True}, 'junnyu/roformer_chinese_char_base': {'do_lower_case': True}, 'junnyu/roformer_small_discriminator': {'do_lower_case': True}, 'junnyu/roformer_small_generator': {'do_lower_case': True}, } class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = VOCAB_FILES_NAMES __snake_case = PRETRAINED_VOCAB_FILES_MAP __snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __snake_case = PRETRAINED_INIT_CONFIGURATION __snake_case = RoFormerTokenizer def __init__( self : str , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Any="[UNK]" , lowerCAmelCase_ : List[Any]="[SEP]" , lowerCAmelCase_ : Union[str, Any]="[PAD]" , lowerCAmelCase_ : Optional[Any]="[CLS]" , lowerCAmelCase_ : Dict="[MASK]" , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : Tuple=None , **lowerCAmelCase_ : Tuple , ) -> List[str]: '''simple docstring''' super().__init__( lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , ) A__ : Union[str, Any] =json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( pre_tok_state.get("""lowercase""" , lowerCAmelCase_ ) != do_lower_case or pre_tok_state.get("""strip_accents""" , lowerCAmelCase_ ) != strip_accents ): A__ : int =getattr(lowerCAmelCase_ , pre_tok_state.pop("""type""" ) ) A__ : Union[str, Any] =do_lower_case A__ : Tuple =strip_accents A__ : int =pre_tok_class(**lowerCAmelCase_ ) A__ : List[Any] =do_lower_case def __getstate__( self : Optional[int] ) -> str: '''simple docstring''' A__ : Any =self.__dict__.copy() A__ : List[str] =BertPreTokenizer() return state def __setstate__( self : int , lowerCAmelCase_ : str ) -> str: '''simple docstring''' A__ : str =d A__ : Optional[Any] =self.__dict__["""_tokenizer"""].get_vocab() A__ : Any =PreTokenizer.custom(JiebaPreTokenizer(lowerCAmelCase_ ) ) def lowercase__ ( self : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str]=None ) -> Optional[Any]: '''simple docstring''' A__ : List[str] =[self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' A__ : int =[self.sep_token_id] A__ : List[str] =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowercase__ ( self : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' A__ : List[Any] =self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ ) return tuple(lowerCAmelCase_ ) def lowercase__ ( self : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Tuple=False , **lowerCAmelCase_ : Tuple , ) -> List[Any]: '''simple docstring''' A__ : List[Any] =BertPreTokenizer() return super().save_pretrained(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
134
1
'''simple docstring''' def a__ ( lowerCAmelCase__ = 1_00_00_00 ) -> int: UpperCAmelCase__ : Dict = set(range(3 , lowerCAmelCase__ , 2 ) ) primes.add(2 ) for p in range(3 , lowerCAmelCase__ , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , lowerCAmelCase__ , lowerCAmelCase__ ) ) ) UpperCAmelCase__ : Union[str, Any] = [float(lowerCAmelCase__ ) for n in range(limit + 1 )] for p in primes: for n in range(lowerCAmelCase__ , limit + 1 , lowerCAmelCase__ ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(F"""{solution() = }""")
299
'''simple docstring''' def a__ ( lowerCAmelCase__ ) -> Optional[Any]: UpperCAmelCase__ : Optional[Any] = len(lowerCAmelCase__ ) for i in range(length - 1 ): UpperCAmelCase__ : Optional[Any] = i for k in range(i + 1 , lowerCAmelCase__ ): if collection[k] < collection[least]: UpperCAmelCase__ : Dict = k if least != i: UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = (collection[i], collection[least]) return collection if __name__ == "__main__": UpperCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip() UpperCamelCase__ = [int(item) for item in user_input.split(''',''')] print(selection_sort(unsorted))
299
1
import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging snake_case : str = logging.get_logger(__name__) class snake_case_ (lowerCamelCase_ ): UpperCAmelCase__ : List[Any] = ['''input_features''', '''is_longer'''] def __init__( self :Union[str, Any] ,__snake_case :Optional[Any]=64 ,__snake_case :Dict=4_80_00 ,__snake_case :List[Any]=4_80 ,__snake_case :str=10 ,__snake_case :int=10_24 ,__snake_case :List[Any]=0.0 ,__snake_case :Optional[Any]=False ,__snake_case :float = 0 ,__snake_case :float = 1_40_00 ,__snake_case :int = None ,__snake_case :str = "fusion" ,__snake_case :str = "repeatpad" ,**__snake_case :List[str] ,) -> Any: super().__init__( feature_size=__snake_case ,sampling_rate=__snake_case ,padding_value=__snake_case ,return_attention_mask=__snake_case ,**__snake_case ,) a__ = top_db a__ = truncation a__ = padding a__ = fft_window_size a__ = (fft_window_size >> 1) + 1 a__ = hop_length a__ = max_length_s a__ = max_length_s * sampling_rate a__ = sampling_rate a__ = frequency_min a__ = frequency_max a__ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=__snake_case ,min_frequency=__snake_case ,max_frequency=__snake_case ,sampling_rate=__snake_case ,norm=__snake_case ,mel_scale='htk' ,) a__ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=__snake_case ,min_frequency=__snake_case ,max_frequency=__snake_case ,sampling_rate=__snake_case ,norm='slaney' ,mel_scale='slaney' ,) def lowerCamelCase__( self :List[Any] ) -> Dict[str, Any]: a__ = copy.deepcopy(self.__dict__ ) a__ = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def lowerCamelCase__( self :Any ,__snake_case :np.array ,__snake_case :Optional[np.array] = None ) -> np.ndarray: a__ = spectrogram( __snake_case ,window_function(self.fft_window_size ,'hann' ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=__snake_case ,log_mel='dB' ,) return log_mel_spectrogram.T def lowerCamelCase__( self :List[Any] ,__snake_case :Optional[int] ,__snake_case :Optional[int] ,__snake_case :Any ) -> Dict: a__ = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk a__ = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk a__ = [0] # randomly choose index for each part a__ = np.random.choice(ranges[0] ) a__ = np.random.choice(ranges[1] ) a__ = np.random.choice(ranges[2] ) a__ = mel[idx_front : idx_front + chunk_frames, :] a__ = mel[idx_middle : idx_middle + chunk_frames, :] a__ = mel[idx_back : idx_back + chunk_frames, :] a__ = torch.tensor(mel[None, None, :] ) a__ = torch.nn.functional.interpolate( __snake_case ,size=[chunk_frames, 64] ,mode='bilinear' ,align_corners=__snake_case ) a__ = mel_shrink[0][0].numpy() a__ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 ) return mel_fusion def lowerCamelCase__( self :List[Any] ,__snake_case :np.array ,__snake_case :List[str] ,__snake_case :Optional[int] ,__snake_case :int ) -> np.array: if waveform.shape[0] > max_length: if truncation == "rand_trunc": a__ = True # random crop to max_length (for compatibility) -> this should be handled by self.pad a__ = len(__snake_case ) - max_length a__ = np.random.randint(0 ,overflow + 1 ) a__ = waveform[idx : idx + max_length] a__ = self._np_extract_fbank_features(__snake_case ,self.mel_filters_slaney )[None, :] elif truncation == "fusion": a__ = self._np_extract_fbank_features(__snake_case ,self.mel_filters ) a__ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed a__ = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. a__ = np.stack([mel, mel, mel, mel] ,axis=0 ) a__ = False else: a__ = self._random_mel_fusion(__snake_case ,__snake_case ,__snake_case ) a__ = True else: raise NotImplementedError(F'data_truncating {truncation} not implemented' ) else: a__ = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": a__ = int(max_length / len(__snake_case ) ) a__ = np.stack(np.tile(__snake_case ,n_repeat + 1 ) )[:max_length] if padding == "repeatpad": a__ = int(max_length / len(__snake_case ) ) a__ = np.stack(np.tile(__snake_case ,__snake_case ) ) a__ = np.pad(__snake_case ,(0, max_length - waveform.shape[0]) ,mode='constant' ,constant_values=0 ) if truncation == "fusion": a__ = self._np_extract_fbank_features(__snake_case ,self.mel_filters ) a__ = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 ) else: a__ = self._np_extract_fbank_features(__snake_case ,self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self :Dict ,__snake_case :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,__snake_case :str = None ,__snake_case :Optional[str] = None ,__snake_case :Optional[int] = None ,__snake_case :Optional[int] = None ,__snake_case :Optional[Union[str, TensorType]] = None ,**__snake_case :Optional[Any] ,) -> BatchFeature: a__ = truncation if truncation is not None else self.truncation a__ = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a' F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input' F' was sampled with {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) a__ = isinstance(__snake_case ,np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'Only mono-channel audio is supported for input to {self}' ) a__ = is_batched_numpy or ( isinstance(__snake_case ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) )) ) if is_batched: a__ = [np.asarray(__snake_case ,dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(__snake_case ,np.ndarray ): a__ = np.asarray(__snake_case ,dtype=np.floataa ) elif isinstance(__snake_case ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): a__ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: a__ = [np.asarray(__snake_case )] # convert to mel spectrogram, truncate and pad if needed. a__ = [ self._get_input_mel(__snake_case ,max_length if max_length else self.nb_max_samples ,__snake_case ,__snake_case ) for waveform in raw_speech ] a__ = [] a__ = [] for mel, longer in padded_inputs: input_mel.append(__snake_case ) is_longer.append(__snake_case ) if truncation == "fusion" and sum(__snake_case ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer a__ = np.random.randint(0 ,len(__snake_case ) ) a__ = True if isinstance(input_mel[0] ,__snake_case ): a__ = [np.asarray(__snake_case ,dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool a__ = [[longer] for longer in is_longer] a__ = {'input_features': input_mel, 'is_longer': is_longer} a__ = BatchFeature(__snake_case ) if return_tensors is not None: a__ = input_features.convert_to_tensors(__snake_case ) return input_features
240
import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers snake_case : List[str] = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append('''dataclasses''') if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append('''importlib_metadata''') for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""") def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple=None ): require_version(deps[pkg] , __lowerCAmelCase )
240
1
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo SCREAMING_SNAKE_CASE__ : Union[str, Any] = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n" SCREAMING_SNAKE_CASE__ : Tuple = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n" SCREAMING_SNAKE_CASE__ : str = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): def __A ( self : str ) -> MetricInfo: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ), '''references''': datasets.Sequence( datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ), } ) , ) def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : List[List[List[str]]] , SCREAMING_SNAKE_CASE__ : List[List[str]] , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = 4 , ) -> Dict[str, float]: return { "google_bleu": gleu_score.corpus_gleu( list_of_references=SCREAMING_SNAKE_CASE__ , hypotheses=SCREAMING_SNAKE_CASE__ , min_len=SCREAMING_SNAKE_CASE__ , max_len=SCREAMING_SNAKE_CASE__ ) }
339
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> int: return abs(__lowerCAmelCase ) if a == 0 else greatest_common_divisor(b % a , __lowerCAmelCase ) def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> int: while y: # --> when y=0 then loop will terminate and return x as final GCD. __lowerCamelCase , __lowerCamelCase = y, x % y return abs(__lowerCAmelCase ) def __magic_name__ ( ) -> Tuple: try: __lowerCamelCase = input('''Enter two integers separated by comma (,): ''' ).split(''',''' ) __lowerCamelCase = int(nums[0] ) __lowerCamelCase = int(nums[1] ) print( f'''greatest_common_divisor({num_a}, {num_a}) = ''' f'''{greatest_common_divisor(__lowerCAmelCase , __lowerCAmelCase )}''' ) print(f'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(__lowerCAmelCase , __lowerCAmelCase )}''' ) except (IndexError, UnboundLocalError, ValueError): print('''Wrong input''' ) if __name__ == "__main__": main()
339
1
import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) lowercase_ = { "sample_size": 32, "in_channels": 3, "out_channels": 3, "layers_per_block": 2, "num_class_embeds": 1000, "block_out_channels": [32, 64], "attention_head_dim": 8, "down_block_types": [ "ResnetDownsampleBlock2D", "AttnDownBlock2D", ], "up_block_types": [ "AttnUpBlock2D", "ResnetUpsampleBlock2D", ], "resnet_time_scale_shift": "scale_shift", "upsample_type": "resnet", "downsample_type": "resnet", } lowercase_ = { "sample_size": 64, "in_channels": 3, "out_channels": 3, "layers_per_block": 3, "num_class_embeds": 1000, "block_out_channels": [192, 192 * 2, 192 * 3, 192 * 4], "attention_head_dim": 64, "down_block_types": [ "ResnetDownsampleBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", ], "up_block_types": [ "AttnUpBlock2D", "AttnUpBlock2D", "AttnUpBlock2D", "ResnetUpsampleBlock2D", ], "resnet_time_scale_shift": "scale_shift", "upsample_type": "resnet", "downsample_type": "resnet", } lowercase_ = { "sample_size": 256, "in_channels": 3, "out_channels": 3, "layers_per_block": 2, "num_class_embeds": None, "block_out_channels": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4], "attention_head_dim": 64, "down_block_types": [ "ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", ], "up_block_types": [ "AttnUpBlock2D", "AttnUpBlock2D", "AttnUpBlock2D", "ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D", ], "resnet_time_scale_shift": "default", "upsample_type": "resnet", "downsample_type": "resnet", } lowercase_ = { "num_train_timesteps": 40, "sigma_min": 0.002, "sigma_max": 80.0, } lowercase_ = { "num_train_timesteps": 201, "sigma_min": 0.002, "sigma_max": 80.0, } lowercase_ = { "num_train_timesteps": 151, "sigma_min": 0.002, "sigma_max": 80.0, } def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]: '''simple docstring''' if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError('boolean value expected' ) def _snake_case( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=False ) -> Any: '''simple docstring''' A__ = checkpoint[f'{old_prefix}.in_layers.0.weight'] A__ = checkpoint[f'{old_prefix}.in_layers.0.bias'] A__ = checkpoint[f'{old_prefix}.in_layers.2.weight'] A__ = checkpoint[f'{old_prefix}.in_layers.2.bias'] A__ = checkpoint[f'{old_prefix}.emb_layers.1.weight'] A__ = checkpoint[f'{old_prefix}.emb_layers.1.bias'] A__ = checkpoint[f'{old_prefix}.out_layers.0.weight'] A__ = checkpoint[f'{old_prefix}.out_layers.0.bias'] A__ = checkpoint[f'{old_prefix}.out_layers.3.weight'] A__ = checkpoint[f'{old_prefix}.out_layers.3.bias'] if has_skip: A__ = checkpoint[f'{old_prefix}.skip_connection.weight'] A__ = checkpoint[f'{old_prefix}.skip_connection.bias'] return new_checkpoint def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str]=None ) -> Tuple: '''simple docstring''' A__ , A__ , A__ = checkpoint[f'{old_prefix}.qkv.weight'].chunk(3 , dim=0 ) A__ , A__ , A__ = checkpoint[f'{old_prefix}.qkv.bias'].chunk(3 , dim=0 ) A__ = checkpoint[f'{old_prefix}.norm.weight'] A__ = checkpoint[f'{old_prefix}.norm.bias'] A__ = weight_q.squeeze(-1 ).squeeze(-1 ) A__ = bias_q.squeeze(-1 ).squeeze(-1 ) A__ = weight_k.squeeze(-1 ).squeeze(-1 ) A__ = bias_k.squeeze(-1 ).squeeze(-1 ) A__ = weight_v.squeeze(-1 ).squeeze(-1 ) A__ = bias_v.squeeze(-1 ).squeeze(-1 ) A__ = ( checkpoint[f'{old_prefix}.proj_out.weight'].squeeze(-1 ).squeeze(-1 ) ) A__ = checkpoint[f'{old_prefix}.proj_out.bias'].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict ) -> str: '''simple docstring''' A__ = torch.load(SCREAMING_SNAKE_CASE__ , map_location='cpu' ) A__ = {} A__ = checkpoint['time_embed.0.weight'] A__ = checkpoint['time_embed.0.bias'] A__ = checkpoint['time_embed.2.weight'] A__ = checkpoint['time_embed.2.bias'] if unet_config["num_class_embeds"] is not None: A__ = checkpoint['label_emb.weight'] A__ = checkpoint['input_blocks.0.0.weight'] A__ = checkpoint['input_blocks.0.0.bias'] A__ = unet_config['down_block_types'] A__ = unet_config['layers_per_block'] A__ = unet_config['attention_head_dim'] A__ = unet_config['block_out_channels'] A__ = 1 A__ = channels_list[0] for i, layer_type in enumerate(SCREAMING_SNAKE_CASE__ ): A__ = channels_list[i] A__ = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(SCREAMING_SNAKE_CASE__ ): A__ = f'down_blocks.{i}.resnets.{j}' A__ = f'input_blocks.{current_layer}.0' A__ = True if j == 0 and downsample_block_has_skip else False A__ = convert_resnet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , has_skip=SCREAMING_SNAKE_CASE__ ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(SCREAMING_SNAKE_CASE__ ): A__ = f'down_blocks.{i}.resnets.{j}' A__ = f'input_blocks.{current_layer}.0' A__ = True if j == 0 and downsample_block_has_skip else False A__ = convert_resnet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , has_skip=SCREAMING_SNAKE_CASE__ ) A__ = f'down_blocks.{i}.attentions.{j}' A__ = f'input_blocks.{current_layer}.1' A__ = convert_attention( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) current_layer += 1 if i != len(SCREAMING_SNAKE_CASE__ ) - 1: A__ = f'down_blocks.{i}.downsamplers.0' A__ = f'input_blocks.{current_layer}.0' A__ = convert_resnet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) current_layer += 1 A__ = current_channels # hardcoded the mid-block for now A__ = 'mid_block.resnets.0' A__ = 'middle_block.0' A__ = convert_resnet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) A__ = 'mid_block.attentions.0' A__ = 'middle_block.1' A__ = convert_attention(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) A__ = 'mid_block.resnets.1' A__ = 'middle_block.2' A__ = convert_resnet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) A__ = 0 A__ = unet_config['up_block_types'] for i, layer_type in enumerate(SCREAMING_SNAKE_CASE__ ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): A__ = f'up_blocks.{i}.resnets.{j}' A__ = f'output_blocks.{current_layer}.0' A__ = convert_resnet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , has_skip=SCREAMING_SNAKE_CASE__ ) current_layer += 1 if i != len(SCREAMING_SNAKE_CASE__ ) - 1: A__ = f'up_blocks.{i}.upsamplers.0' A__ = f'output_blocks.{current_layer-1}.1' A__ = convert_resnet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): A__ = f'up_blocks.{i}.resnets.{j}' A__ = f'output_blocks.{current_layer}.0' A__ = convert_resnet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , has_skip=SCREAMING_SNAKE_CASE__ ) A__ = f'up_blocks.{i}.attentions.{j}' A__ = f'output_blocks.{current_layer}.1' A__ = convert_attention( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) current_layer += 1 if i != len(SCREAMING_SNAKE_CASE__ ) - 1: A__ = f'up_blocks.{i}.upsamplers.0' A__ = f'output_blocks.{current_layer-1}.2' A__ = convert_resnet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) A__ = checkpoint['out.0.weight'] A__ = checkpoint['out.0.bias'] A__ = checkpoint['out.2.weight'] A__ = checkpoint['out.2.bias'] return new_checkpoint if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.") parser.add_argument( "--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model." ) parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.") lowercase_ = parser.parse_args() lowercase_ = strabool(args.class_cond) lowercase_ = os.path.basename(args.unet_path) print(f"""Checkpoint: {ckpt_name}""") # Get U-Net config if "imagenet64" in ckpt_name: lowercase_ = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): lowercase_ = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: lowercase_ = TEST_UNET_CONFIG else: raise ValueError(f"""Checkpoint type {ckpt_name} is not currently supported.""") if not args.class_cond: lowercase_ = None lowercase_ = con_pt_to_diffuser(args.unet_path, unet_config) lowercase_ = UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: lowercase_ = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: lowercase_ = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): lowercase_ = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(f"""Checkpoint type {ckpt_name} is not currently supported.""") lowercase_ = CMStochasticIterativeScheduler(**scheduler_config) lowercase_ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
7
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = (DPMSolverSinglestepScheduler,) lowerCamelCase = (('num_inference_steps', 25),) def snake_case__ ( self : Tuple,**lowercase_ : Dict )-> Optional[int]: '''simple docstring''' A__ = { 'num_train_timesteps': 1_0_0_0, 'beta_start': 0.0_001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'solver_order': 2, 'prediction_type': 'epsilon', 'thresholding': False, 'sample_max_value': 1.0, 'algorithm_type': 'dpmsolver++', 'solver_type': 'midpoint', 'lambda_min_clipped': -float('inf' ), 'variance_type': None, } config.update(**lowercase_ ) return config def snake_case__ ( self : str,lowercase_ : Optional[Any]=0,**lowercase_ : Any )-> List[Any]: '''simple docstring''' A__ = dict(self.forward_default_kwargs ) A__ = kwargs.pop('num_inference_steps',lowercase_ ) A__ = self.dummy_sample A__ = 0.1 * sample A__ = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: A__ = self.get_scheduler_config(**lowercase_ ) A__ = scheduler_class(**lowercase_ ) scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals A__ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase_ ) A__ = scheduler_class.from_pretrained(lowercase_ ) new_scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals A__ = dummy_past_residuals[: new_scheduler.config.solver_order] A__ , A__ = sample, sample for t in range(lowercase_,time_step + scheduler.config.solver_order + 1 ): A__ = scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample A__ = new_scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def snake_case__ ( self : List[str] )-> List[Any]: '''simple docstring''' pass def snake_case__ ( self : Tuple,lowercase_ : Union[str, Any]=0,**lowercase_ : Union[str, Any] )-> Union[str, Any]: '''simple docstring''' A__ = dict(self.forward_default_kwargs ) A__ = kwargs.pop('num_inference_steps',lowercase_ ) A__ = self.dummy_sample A__ = 0.1 * sample A__ = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: A__ = self.get_scheduler_config() A__ = scheduler_class(**lowercase_ ) scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals (must be after setting timesteps) A__ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase_ ) A__ = scheduler_class.from_pretrained(lowercase_ ) # copy over dummy past residuals new_scheduler.set_timesteps(lowercase_ ) # copy over dummy past residual (must be after setting timesteps) A__ = dummy_past_residuals[: new_scheduler.config.solver_order] A__ = scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample A__ = new_scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def snake_case__ ( self : Optional[Any],lowercase_ : Optional[int]=None,**lowercase_ : int )-> int: '''simple docstring''' if scheduler is None: A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config(**lowercase_ ) A__ = scheduler_class(**lowercase_ ) A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config(**lowercase_ ) A__ = scheduler_class(**lowercase_ ) A__ = 1_0 A__ = self.dummy_model() A__ = self.dummy_sample_deter scheduler.set_timesteps(lowercase_ ) for i, t in enumerate(scheduler.timesteps ): A__ = model(lowercase_,lowercase_ ) A__ = scheduler.step(lowercase_,lowercase_,lowercase_ ).prev_sample return sample def snake_case__ ( self : Any )-> str: '''simple docstring''' A__ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) A__ = 5_0 A__ = self.dummy_model() A__ = self.dummy_sample_deter scheduler.set_timesteps(lowercase_ ) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:] ): A__ = model(lowercase_,lowercase_ ) A__ = scheduler.step(lowercase_,lowercase_,lowercase_ ).prev_sample A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.2_574 ) < 1E-3 def snake_case__ ( self : Optional[Any] )-> List[Any]: '''simple docstring''' for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_configs(num_train_timesteps=lowercase_ ) def snake_case__ ( self : int )-> Optional[Any]: '''simple docstring''' A__ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) A__ = self.full_loop(scheduler=lowercase_ ) A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.2_791 ) < 1E-3 A__ = DEISMultistepScheduler.from_config(scheduler.config ) A__ = DPMSolverMultistepScheduler.from_config(scheduler.config ) A__ = UniPCMultistepScheduler.from_config(scheduler.config ) A__ = DPMSolverSinglestepScheduler.from_config(scheduler.config ) A__ = self.full_loop(scheduler=lowercase_ ) A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.2_791 ) < 1E-3 def snake_case__ ( self : Tuple )-> Any: '''simple docstring''' self.check_over_configs(thresholding=lowercase_ ) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=lowercase_,prediction_type=lowercase_,sample_max_value=lowercase_,algorithm_type='dpmsolver++',solver_order=lowercase_,solver_type=lowercase_,) def snake_case__ ( self : List[Any] )-> int: '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowercase_ ) def snake_case__ ( self : Dict )-> List[Any]: '''simple docstring''' for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=lowercase_,solver_type=lowercase_,prediction_type=lowercase_,algorithm_type=lowercase_,) A__ = self.full_loop( solver_order=lowercase_,solver_type=lowercase_,prediction_type=lowercase_,algorithm_type=lowercase_,) assert not torch.isnan(lowercase_ ).any(), "Samples have nan numbers" def snake_case__ ( self : Optional[int] )-> Tuple: '''simple docstring''' self.check_over_configs(lower_order_final=lowercase_ ) self.check_over_configs(lower_order_final=lowercase_ ) def snake_case__ ( self : Tuple )-> Optional[int]: '''simple docstring''' self.check_over_configs(lambda_min_clipped=-float('inf' ) ) self.check_over_configs(lambda_min_clipped=-5.1 ) def snake_case__ ( self : Optional[Any] )-> Tuple: '''simple docstring''' self.check_over_configs(variance_type=lowercase_ ) self.check_over_configs(variance_type='learned_range' ) def snake_case__ ( self : str )-> Any: '''simple docstring''' for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_forward(num_inference_steps=lowercase_,time_step=0 ) def snake_case__ ( self : Tuple )-> Tuple: '''simple docstring''' A__ = self.full_loop() A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.2_791 ) < 1E-3 def snake_case__ ( self : Any )-> Union[str, Any]: '''simple docstring''' A__ = self.full_loop(use_karras_sigmas=lowercase_ ) A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.2_248 ) < 1E-3 def snake_case__ ( self : Union[str, Any] )-> Tuple: '''simple docstring''' A__ = self.full_loop(prediction_type='v_prediction' ) A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.1_453 ) < 1E-3 def snake_case__ ( self : Tuple )-> int: '''simple docstring''' A__ = self.full_loop(prediction_type='v_prediction',use_karras_sigmas=lowercase_ ) A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.0_649 ) < 1E-3 def snake_case__ ( self : List[Any] )-> int: '''simple docstring''' A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config(thresholding=lowercase_,dynamic_thresholding_ratio=0 ) A__ = scheduler_class(**lowercase_ ) A__ = 1_0 A__ = self.dummy_model() A__ = self.dummy_sample_deter.half() scheduler.set_timesteps(lowercase_ ) for i, t in enumerate(scheduler.timesteps ): A__ = model(lowercase_,lowercase_ ) A__ = scheduler.step(lowercase_,lowercase_,lowercase_ ).prev_sample assert sample.dtype == torch.floataa
7
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE__ = { 'configuration_mobilebert': [ 'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileBertConfig', 'MobileBertOnnxConfig', ], 'tokenization_mobilebert': ['MobileBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ['MobileBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ 'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'MobileBertForMaskedLM', 'MobileBertForMultipleChoice', 'MobileBertForNextSentencePrediction', 'MobileBertForPreTraining', 'MobileBertForQuestionAnswering', 'MobileBertForSequenceClassification', 'MobileBertForTokenClassification', 'MobileBertLayer', 'MobileBertModel', 'MobileBertPreTrainedModel', 'load_tf_weights_in_mobilebert', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ 'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFMobileBertForMaskedLM', 'TFMobileBertForMultipleChoice', 'TFMobileBertForNextSentencePrediction', 'TFMobileBertForPreTraining', 'TFMobileBertForQuestionAnswering', 'TFMobileBertForSequenceClassification', 'TFMobileBertForTokenClassification', 'TFMobileBertMainLayer', 'TFMobileBertModel', 'TFMobileBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mobilebert import ( MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileBertConfig, MobileBertOnnxConfig, ) from .tokenization_mobilebert import MobileBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mobilebert_fast import MobileBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilebert import ( MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertLayer, MobileBertModel, MobileBertPreTrainedModel, load_tf_weights_in_mobilebert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilebert import ( TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertMainLayer, TFMobileBertModel, TFMobileBertPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
364
'''simple docstring''' import os from typing import List, Optional, Union from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import AddedToken from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = {'vocab_file': 'vocab.txt'} SCREAMING_SNAKE_CASE__ = { 'vocab_file': { 'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt', 'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt', }, } SCREAMING_SNAKE_CASE__ = { 'facebook/esm2_t6_8M_UR50D': 1_0_2_4, 'facebook/esm2_t12_35M_UR50D': 1_0_2_4, } def lowercase__ ( __UpperCamelCase )-> Any: with open(__UpperCamelCase , """r""" ) as f: UpperCamelCase = f.read().splitlines() return [l.strip() for l in lines] class a_ ( lowerCamelCase ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = ["""input_ids""", """attention_mask"""] def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<cls>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE="<eos>" , **_SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" super().__init__(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = load_vocab_file(_SCREAMING_SNAKE_CASE ) UpperCamelCase = dict(enumerate(self.all_tokens ) ) UpperCamelCase = {tok: ind for ind, tok in enumerate(self.all_tokens )} UpperCamelCase = unk_token UpperCamelCase = cls_token UpperCamelCase = pad_token UpperCamelCase = mask_token UpperCamelCase = eos_token UpperCamelCase = self.all_tokens self._create_trie(self.unique_no_split_tokens ) def A__ ( self , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" return self._id_to_token.get(_SCREAMING_SNAKE_CASE , self.unk_token ) def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" return self._token_to_id.get(_SCREAMING_SNAKE_CASE , self._token_to_id.get(self.unk_token ) ) def A__ ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" return text.split() def A__ ( self , _SCREAMING_SNAKE_CASE=False ) -> Dict: """simple docstring""" return len(self._id_to_token ) def A__ ( self ) -> Tuple: """simple docstring""" return {token: i for i, token in enumerate(self.all_tokens )} def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" return self._token_to_id.get(_SCREAMING_SNAKE_CASE , self._token_to_id.get(self.unk_token ) ) def A__ ( self , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" return self._id_to_token.get(_SCREAMING_SNAKE_CASE , self.unk_token ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" UpperCamelCase = [self.cls_token_id] UpperCamelCase = [self.eos_token_id] # No sep token in ESM vocabulary if token_ids_a is None: if self.eos_token_id is None: return cls + token_ids_a else: return cls + token_ids_a + sep elif self.eos_token_id is None: raise ValueError("""Cannot tokenize multiple sequences when EOS token is not set!""" ) return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( """You should not supply a second sequence if the provided sequence of """ """ids is already formatted with special tokens for the model.""" ) return [1 if token in self.all_special_ids else 0 for token in token_ids_a] UpperCamelCase = [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] if token_ids_a is not None: mask += [0] * len(_SCREAMING_SNAKE_CASE ) + [1] return mask def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" UpperCamelCase = os.path.join(_SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + """vocab.txt""" ) with open(_SCREAMING_SNAKE_CASE , """w""" ) as f: f.write("""\n""".join(self.all_tokens ) ) return (vocab_file,) @property def A__ ( self ) -> int: """simple docstring""" return self.get_vocab_size(with_added_tokens=_SCREAMING_SNAKE_CASE ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) -> int: """simple docstring""" return super()._add_tokens(_SCREAMING_SNAKE_CASE , special_tokens=_SCREAMING_SNAKE_CASE )
183
0
"""simple docstring""" import math import random def _UpperCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : List[Any] = False ) -> List[Any]: if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value UpperCAmelCase__ = 0.02 def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int ) -> Union[str, Any]: _snake_case = float(2 * (random.randint(1 , 1_00 )) - 1 ) for _ in range(__lowerCamelCase ): # Forward propagation _snake_case = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? _snake_case = (expected / 1_00) - layer_a # Error delta _snake_case = layer_1_error * sigmoid_function(__lowerCamelCase , __lowerCamelCase ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 1_00 if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase__ = int(input('Expected value: ')) UpperCAmelCase__ = int(input('Number of propagations: ')) print(forward_propagation(expected, number_propagations))
288
"""simple docstring""" from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class _UpperCAmelCase : UpperCamelCase = PegasusConfig UpperCamelCase = {} UpperCamelCase = '''gelu''' def __init__( self :Union[str, Any] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :str=13 , __UpperCamelCase :List[Any]=7 , __UpperCamelCase :Union[str, Any]=True , __UpperCamelCase :List[Any]=False , __UpperCamelCase :Any=99 , __UpperCamelCase :Tuple=32 , __UpperCamelCase :Optional[int]=2 , __UpperCamelCase :Optional[Any]=4 , __UpperCamelCase :Tuple=37 , __UpperCamelCase :Optional[Any]=0.1 , __UpperCamelCase :Tuple=0.1 , __UpperCamelCase :Optional[int]=40 , __UpperCamelCase :Tuple=2 , __UpperCamelCase :Dict=1 , __UpperCamelCase :Any=0 , ): A = parent A = batch_size A = seq_length A = is_training A = use_labels A = vocab_size A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = eos_token_id A = pad_token_id A = bos_token_id def lowerCamelCase ( self :Tuple ): A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) A = tf.concat([input_ids, eos_tensor] , axis=1 ) A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) A = prepare_pegasus_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) return config, inputs_dict def lowerCamelCase ( self :str , __UpperCamelCase :str , __UpperCamelCase :Union[str, Any] ): A = TFPegasusModel(config=__UpperCamelCase ).get_decoder() A = inputs_dict["input_ids"] A = input_ids[:1, :] A = inputs_dict["attention_mask"][:1, :] A = inputs_dict["head_mask"] A = 1 # first forward pass A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , head_mask=__UpperCamelCase , use_cache=__UpperCamelCase ) A, A = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids A = ids_tensor((self.batch_size, 3) , config.vocab_size ) A = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and A = tf.concat([input_ids, next_tokens] , axis=-1 ) A = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) A = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0] A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice A = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) A = output_from_no_past[:, -3:, random_slice_idx] A = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__UpperCamelCase , __UpperCamelCase , rtol=1e-3 ) def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , ): if attention_mask is None: A = tf.cast(tf.math.not_equal(UpperCamelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: A = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: A = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: A = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: A = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ): UpperCamelCase = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () UpperCamelCase = (TFPegasusForConditionalGeneration,) if is_tf_available() else () UpperCamelCase = ( { '''conversational''': TFPegasusForConditionalGeneration, '''feature-extraction''': TFPegasusModel, '''summarization''': TFPegasusForConditionalGeneration, '''text2text-generation''': TFPegasusForConditionalGeneration, '''translation''': TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) UpperCamelCase = True UpperCamelCase = False UpperCamelCase = False def lowerCamelCase ( self :int ): A = TFPegasusModelTester(self ) A = ConfigTester(self , config_class=__UpperCamelCase ) def lowerCamelCase ( self :Dict ): self.config_tester.run_common_tests() def lowerCamelCase ( self :Any ): A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__UpperCamelCase ) @require_sentencepiece @require_tokenizers @require_tf class _UpperCAmelCase ( unittest.TestCase ): UpperCamelCase = [ ''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''', ''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''', ] UpperCamelCase = [ '''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to''' ''' reduce the risk of wildfires.''', '''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''', ] # differs slightly from pytorch, likely due to numerical differences in linear layers UpperCamelCase = '''google/pegasus-xsum''' @cached_property def lowerCamelCase ( self :Any ): return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def lowerCamelCase ( self :Dict ): A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def lowerCamelCase ( self :str , **__UpperCamelCase :str ): A = self.translate_src_text(**__UpperCamelCase ) assert self.expected_text == generated_words def lowerCamelCase ( self :Any , **__UpperCamelCase :List[str] ): A = self.tokenizer(self.src_text , **__UpperCamelCase , padding=__UpperCamelCase , return_tensors="tf" ) A = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__UpperCamelCase , ) A = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCamelCase ) return generated_words @slow def lowerCamelCase ( self :Union[str, Any] ): self._assert_generated_batch_equal_expected()
292
0
from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCAmelCase__ : def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any]=3 , SCREAMING_SNAKE_CASE__ : str=32 , SCREAMING_SNAKE_CASE__ : int=3 , SCREAMING_SNAKE_CASE__ : Dict=10 , SCREAMING_SNAKE_CASE__ : Tuple=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE__ : Optional[int]=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : int="relu" , SCREAMING_SNAKE_CASE__ : Any=3 , SCREAMING_SNAKE_CASE__ : int=None , ) -> Dict: __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = image_size __lowerCamelCase = num_channels __lowerCamelCase = embeddings_size __lowerCamelCase = hidden_sizes __lowerCamelCase = depths __lowerCamelCase = is_training __lowerCamelCase = use_labels __lowerCamelCase = hidden_act __lowerCamelCase = num_labels __lowerCamelCase = scope __lowerCamelCase = len(__snake_case ) def __A ( self : Dict ) -> Optional[Any]: __lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.num_labels ) __lowerCamelCase = self.get_config() return config, pixel_values, labels def __A ( self : Tuple ) -> List[str]: return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple ) -> int: __lowerCamelCase = TFRegNetModel(config=__snake_case ) __lowerCamelCase = model(__snake_case , training=__snake_case ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple ) -> List[Any]: __lowerCamelCase = self.num_labels __lowerCamelCase = TFRegNetForImageClassification(__snake_case ) __lowerCamelCase = model(__snake_case , labels=__snake_case , training=__snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __A ( self : Union[str, Any] ) -> Dict: __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase = config_and_inputs __lowerCamelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class lowerCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): a__ : Dict = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () a__ : Optional[Any] = ( {"""feature-extraction""": TFRegNetModel, """image-classification""": TFRegNetForImageClassification} if is_tf_available() else {} ) a__ : Dict = False a__ : Optional[Any] = False a__ : Dict = False a__ : List[Any] = False a__ : Dict = False def __A ( self : Optional[int] ) -> Any: __lowerCamelCase = TFRegNetModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case ) def __A ( self : Tuple ) -> int: return @unittest.skip(reason='''RegNet does not use inputs_embeds''' ) def __A ( self : Dict ) -> Union[str, Any]: pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , ) @slow def __A ( self : List[str] ) -> Dict: super().test_keras_fit() @unittest.skip(reason='''RegNet does not support input and output embeddings''' ) def __A ( self : Tuple ) -> Optional[Any]: pass def __A ( self : str ) -> Union[str, Any]: __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(__snake_case ) __lowerCamelCase = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCamelCase = [*signature.parameters.keys()] __lowerCamelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , __snake_case ) def __A ( self : str ) -> Optional[Any]: __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__snake_case ) def __A ( self : Tuple ) -> str: def check_hidden_states_output(SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any ): __lowerCamelCase = model_class(__snake_case ) __lowerCamelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) , training=__snake_case ) __lowerCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __lowerCamelCase = self.model_tester.num_stages self.assertEqual(len(__snake_case ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() __lowerCamelCase = ['basic', 'bottleneck'] for model_class in self.all_model_classes: for layer_type in layers_type: __lowerCamelCase = layer_type __lowerCamelCase = True check_hidden_states_output(__snake_case , __snake_case , __snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowerCamelCase = True check_hidden_states_output(__snake_case , __snake_case , __snake_case ) def __A ( self : str ) -> Optional[Any]: __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple={} ): __lowerCamelCase = model(__snake_case , return_dict=__snake_case , **__snake_case ) __lowerCamelCase = model(__snake_case , return_dict=__snake_case , **__snake_case ).to_tuple() def recursive_check(SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple ): if isinstance(__snake_case , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(__snake_case , __snake_case ): recursive_check(__snake_case , __snake_case ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(__snake_case , __snake_case ) ) , msg=( '''Tuple and dict output are not equal. Difference:''' f''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}''' ) , ) recursive_check(__snake_case , __snake_case ) for model_class in self.all_model_classes: __lowerCamelCase = model_class(__snake_case ) __lowerCamelCase = self._prepare_for_class(__snake_case , __snake_case ) __lowerCamelCase = self._prepare_for_class(__snake_case , __snake_case ) check_equivalence(__snake_case , __snake_case , __snake_case ) __lowerCamelCase = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case ) __lowerCamelCase = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case ) check_equivalence(__snake_case , __snake_case , __snake_case ) __lowerCamelCase = self._prepare_for_class(__snake_case , __snake_case ) __lowerCamelCase = self._prepare_for_class(__snake_case , __snake_case ) check_equivalence(__snake_case , __snake_case , __snake_case , {'''output_hidden_states''': True} ) __lowerCamelCase = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case ) __lowerCamelCase = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case ) check_equivalence(__snake_case , __snake_case , __snake_case , {'''output_hidden_states''': True} ) def __A ( self : Dict ) -> Tuple: __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__snake_case ) @slow def __A ( self : Any ) -> Tuple: for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase = TFRegNetModel.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) def __magic_name__ ( ) -> Any: __lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class lowerCAmelCase__ ( unittest.TestCase ): @cached_property def __A ( self : Optional[Any] ) -> Dict: return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def __A ( self : List[str] ) -> Dict: __lowerCamelCase = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) __lowerCamelCase = self.default_image_processor __lowerCamelCase = prepare_img() __lowerCamelCase = image_processor(images=__snake_case , return_tensors='''tf''' ) # forward pass __lowerCamelCase = model(**__snake_case , training=__snake_case ) # verify the logits __lowerCamelCase = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , __snake_case ) __lowerCamelCase = tf.constant([-0.4180, -1.5051, -3.4836] ) tf.debugging.assert_near(outputs.logits[0, :3] , __snake_case , atol=1e-4 )
358
import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class lowerCAmelCase__ : def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=99 , SCREAMING_SNAKE_CASE__ : List[Any]=13 , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : int=9 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : int=32 , SCREAMING_SNAKE_CASE__ : Tuple=5 , SCREAMING_SNAKE_CASE__ : List[str]=4 , SCREAMING_SNAKE_CASE__ : str=37 , SCREAMING_SNAKE_CASE__ : int=8 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.002 , SCREAMING_SNAKE_CASE__ : str=1 , SCREAMING_SNAKE_CASE__ : Tuple=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Dict=None , ) -> Optional[Any]: __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = encoder_seq_length __lowerCamelCase = decoder_seq_length # For common tests __lowerCamelCase = self.decoder_seq_length __lowerCamelCase = is_training __lowerCamelCase = use_attention_mask __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = d_ff __lowerCamelCase = relative_attention_num_buckets __lowerCamelCase = dropout_rate __lowerCamelCase = initializer_factor __lowerCamelCase = eos_token_id __lowerCamelCase = pad_token_id __lowerCamelCase = decoder_start_token_id __lowerCamelCase = None __lowerCamelCase = decoder_layers def __A ( self : Any ) -> Tuple: return TaConfig.from_pretrained('''google/umt5-base''' ) def __A ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , ) -> Optional[int]: if attention_mask is None: __lowerCamelCase = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: __lowerCamelCase = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: __lowerCamelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ ) if decoder_head_mask is None: __lowerCamelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ ) if cross_attn_head_mask is None: __lowerCamelCase = torch.ones( config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def __A ( self : List[Any] ) -> Tuple: __lowerCamelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size ) __lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input __lowerCamelCase = input_ids.clamp(self.pad_token_id + 1 ) __lowerCamelCase = decoder_input_ids.clamp(self.pad_token_id + 1 ) __lowerCamelCase = self.get_config() __lowerCamelCase = config.num_attention_heads __lowerCamelCase = self.prepare_inputs_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return config, input_dict def __A ( self : Tuple ) -> List[str]: __lowerCamelCase , __lowerCamelCase = self.prepare_config_and_inputs() return config, inputs_dict def __A ( self : Optional[Any] ) -> Any: return TaConfig( vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def __A ( self : List[Any] ) -> Any: return TaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> int: __lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() __lowerCamelCase = model( input_ids=SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , ) __lowerCamelCase = model(input_ids=SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = result.last_hidden_state __lowerCamelCase = result.past_key_values __lowerCamelCase = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ) , 4 ) def __A ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , ) -> Dict: __lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ ).get_decoder().to(SCREAMING_SNAKE_CASE__ ).eval() # first forward pass __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ ) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) ) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) + 1 ) __lowerCamelCase , __lowerCamelCase = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __lowerCamelCase = ids_tensor((self.batch_size, 1) , config.vocab_size ) # append to next input_ids and __lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )['''last_hidden_state'''] __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ )['''last_hidden_state'''] # select random slice __lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() __lowerCamelCase = output_from_no_past[:, -1, random_slice_idx].detach() __lowerCamelCase = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) ) def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , ) -> Optional[int]: __lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ).half().eval() __lowerCamelCase = model(**SCREAMING_SNAKE_CASE__ )['''last_hidden_state'''] self.parent.assertFalse(torch.isnan(SCREAMING_SNAKE_CASE__ ).any().item() ) @require_torch class lowerCAmelCase__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase ): a__ : List[Any] = ( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) a__ : Union[str, Any] = (UMTaForConditionalGeneration,) if is_torch_available() else () a__ : Tuple = ( { """conversational""": UMTaForConditionalGeneration, """feature-extraction""": UMTaModel, """summarization""": UMTaForConditionalGeneration, """text2text-generation""": UMTaForConditionalGeneration, """translation""": UMTaForConditionalGeneration, """question-answering""": UMTaForQuestionAnswering, } if is_torch_available() else {} ) a__ : int = True a__ : int = False a__ : Tuple = False a__ : Optional[int] = True a__ : Optional[int] = True # The small UMT5 model needs higher percentages for CPU/MP tests a__ : Tuple = [0.8, 0.9] def __A ( self : Tuple ) -> Tuple: __lowerCamelCase = UMTaModelTester(self ) @unittest.skip('''Test has a segmentation fault on torch 1.8.0''' ) def __A ( self : List[str] ) -> Union[str, Any]: __lowerCamelCase = self.model_tester.prepare_config_and_inputs() __lowerCamelCase = UMTaModel(config_and_inputs[0] ).to(SCREAMING_SNAKE_CASE__ ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( SCREAMING_SNAKE_CASE__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=SCREAMING_SNAKE_CASE__ , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , ) @unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' ) def __A ( self : Union[str, Any] ) -> Any: __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*SCREAMING_SNAKE_CASE__ ) def __A ( self : Any ) -> Any: __lowerCamelCase = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions'''] __lowerCamelCase = self.model_tester.prepare_config_and_inputs() __lowerCamelCase = config_and_inputs[0] __lowerCamelCase = UMTaForConditionalGeneration(SCREAMING_SNAKE_CASE__ ).eval() model.to(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = { '''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ), '''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ), '''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ), } for attn_name, (name, mask) in zip(SCREAMING_SNAKE_CASE__ , head_masking.items() ): __lowerCamelCase = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": __lowerCamelCase = torch.ones( config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model.generate( config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=SCREAMING_SNAKE_CASE__ , return_dict_in_generate=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) # We check the state of decoder_attentions and cross_attentions just from the last step __lowerCamelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 ) @unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' ) def __A ( self : Tuple ) -> Optional[Any]: pass @require_torch @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( unittest.TestCase ): @slow @unittest.skip( '''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' ) def __A ( self : int ) -> Optional[Any]: __lowerCamelCase = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=SCREAMING_SNAKE_CASE__ , legacy=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = [ '''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''', '''No se como puedo <extra_id_0>.''', '''This is the reason why we <extra_id_0> them.''', '''The <extra_id_0> walks in <extra_id_1>, seats''', '''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''', ] __lowerCamelCase = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , padding=SCREAMING_SNAKE_CASE__ ).input_ids # fmt: off __lowerCamelCase = torch.tensor( [ [ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1], ] ) # fmt: on torch.testing.assert_allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model.generate(input_ids.to(SCREAMING_SNAKE_CASE__ ) ) __lowerCamelCase = [ '''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''', '''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', ] __lowerCamelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
339
0
"""simple docstring""" import numpy as np from PIL import Image def lowercase ( a__ : Optional[Any] , a__ : str , a__ : str ) -> List[str]: _UpperCamelCase = np.array(lowerCAmelCase_ ) if arr.shape[0] != arr.shape[1]: raise ValueError('''The input array is not a square matrix''' ) _UpperCamelCase = 0 _UpperCamelCase = 0 _UpperCamelCase = 0 _UpperCamelCase = 0 # compute the shape of the output matrix _UpperCamelCase = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape _UpperCamelCase = np.zeros((maxpool_shape, maxpool_shape) ) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix _UpperCamelCase = np.max(arr[i : i + size, j : j + size] ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 _UpperCamelCase = 0 _UpperCamelCase = 0 return updated_arr def lowercase ( a__ : List[str] , a__ : Dict , a__ : Dict ) -> Union[str, Any]: _UpperCamelCase = np.array(lowerCAmelCase_ ) if arr.shape[0] != arr.shape[1]: raise ValueError('''The input array is not a square matrix''' ) _UpperCamelCase = 0 _UpperCamelCase = 0 _UpperCamelCase = 0 _UpperCamelCase = 0 # compute the shape of the output matrix _UpperCamelCase = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape _UpperCamelCase = np.zeros((avgpool_shape, avgpool_shape) ) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix _UpperCamelCase = int(np.average(arr[i : i + size, j : j + size] ) ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 _UpperCamelCase = 0 _UpperCamelCase = 0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name="""avgpooling""", verbose=True) # Loading the image UpperCAmelCase = Image.open("""path_to_image""") # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
256
"""simple docstring""" import pytest from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs @pytest.mark.parametrize( "kwargs, expected" , [ ({"num_shards": 0, "max_num_jobs": 1}, []), ({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]), ({"num_shards": 10, "max_num_jobs": 10}, [range(lowerCAmelCase_ , i + 1 ) for i in range(10 )]), ({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]), ({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]), ({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]), ] , ) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = _distribute_shards(**lowerCAmelCase_ ) assert out == expected @pytest.mark.parametrize( "gen_kwargs, max_num_jobs, expected" , [ ({"foo": 0}, 10, [{"foo": 0}]), ({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]), ({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]), ({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]), ({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]), ] , ) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = _split_gen_kwargs(lowerCAmelCase_ , lowerCAmelCase_ ) assert out == expected @pytest.mark.parametrize( "gen_kwargs, expected" , [ ({"foo": 0}, 1), ({"shards": [0]}, 1), ({"shards": [0, 1, 2, 3]}, 4), ({"shards": [0, 1, 2, 3], "foo": 0}, 4), ({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4), ({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError), ] , ) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' if expected is RuntimeError: with pytest.raises(lowerCAmelCase_ ): _number_of_shards_in_gen_kwargs(lowerCAmelCase_ ) else: __SCREAMING_SNAKE_CASE = _number_of_shards_in_gen_kwargs(lowerCAmelCase_ ) assert out == expected
54
0
def __lowerCamelCase (UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] ): if index == r: for j in range(UpperCAmelCase__ ): print(data[j] , end=" " ) print(" " ) return # When no more elements are there to put in data[] if i >= n: return # current is included, put next at next location SCREAMING_SNAKE_CASE = arr[i] combination_util(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , index + 1 , UpperCAmelCase__ , i + 1 ) # current is excluded, replace it with # next (Note that i+1 is passed, but # index is not changed) combination_util(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , i + 1 ) # The main function that prints all combinations # of size r in arr[] of size n. This function # mainly uses combinationUtil() def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] ): # A temporary array to store all combination one by one SCREAMING_SNAKE_CASE = [0] * r # Print all combination using temporary array 'data[]' combination_util(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , 0 , UpperCAmelCase__ , 0 ) if __name__ == "__main__": # Driver code to check the function above _lowerCamelCase : Tuple = [10, 20, 30, 40, 50] print_combination(arr, len(arr), 3) # This code is contributed by Ambuj sahu
206
from __future__ import annotations from collections.abc import Iterator class lowercase : def __init__( self : str , _UpperCamelCase : int ) -> None: '''simple docstring''' SCREAMING_SNAKE_CASE = value SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None class lowercase : def __init__( self : str , _UpperCamelCase : Node ) -> None: '''simple docstring''' SCREAMING_SNAKE_CASE = tree def __snake_case( self : int , _UpperCamelCase : Node | None ) -> int: '''simple docstring''' if node is None: return 0 return node.value + ( self.depth_first_search(node.left ) + self.depth_first_search(node.right ) ) def __iter__( self : List[Any] ) -> Iterator[int]: '''simple docstring''' yield self.depth_first_search(self.tree ) if __name__ == "__main__": import doctest doctest.testmod()
206
1
'''simple docstring''' from __future__ import annotations import math def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowerCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True A__ : Optional[Any] =[num for num in range(3, 10_00_01, 2) if not is_prime(num)] def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" if not isinstance(lowerCAmelCase , lowerCAmelCase ): raise ValueError("""n must be an integer""" ) if n <= 0: raise ValueError("""n must be >= 0""" ) _lowerCAmelCase = [] for num in range(len(lowerCAmelCase ) ): _lowerCAmelCase = 0 while 2 * i * i <= odd_composites[num]: _lowerCAmelCase = odd_composites[num] - 2 * i * i if is_prime(lowerCAmelCase ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(lowerCAmelCase ) == n: return list_nums return [] def UpperCamelCase__ ( ): """simple docstring""" return compute_nums(1 )[0] if __name__ == "__main__": print(F"""{solution() = }""")
70
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() A__ : List[Any] =logging.get_logger(__name__) A__ : Any =torch.device('''cpu''') def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" _lowerCAmelCase = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw ) return im def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.17_03e00, 2.11_07e00, -2.08_11e00, 8.86_85e-01, 2.43_60e-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.96_36e-01, 2.34_78e-01, -1.69_63e00, -1.73_81e00, -8.63_37e-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.27_68e-01, -4.74_29e-01, -1.08_97e00, -1.02_48e00, 3.55_23e-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.53_30e-01, 2.42_11e-01, -6.01_85e-01, -8.27_89e-01, -6.04_46e-02] ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = dct.pop(lowerCAmelCase ) _lowerCAmelCase = val def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = [] for k in state_dict.keys(): _lowerCAmelCase = k if ".pwconv" in k: _lowerCAmelCase = k_new.replace(""".pwconv""" , """.point_wise_conv""" ) if ".dwconv" in k: _lowerCAmelCase = k_new.replace(""".dwconv""" , """.depth_wise_conv""" ) if ".Proj." in k: _lowerCAmelCase = k_new.replace(""".Proj.""" , """.proj.""" ) if "patch_embed" in k_new: _lowerCAmelCase = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" ) if "network" in k_new: _lowerCAmelCase = k_new.split(""".""" ) if ls[2].isdigit(): _lowerCAmelCase = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] ) else: _lowerCAmelCase = k_new.replace("""network""" , """swiftformer.encoder.network""" ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size _lowerCAmelCase = 10_00 _lowerCAmelCase = """huggingface/label-files""" _lowerCAmelCase = """imagenet-1k-id2label.json""" _lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) ) _lowerCAmelCase = {int(lowerCAmelCase ): v for k, v in idalabel.items()} _lowerCAmelCase = idalabel _lowerCAmelCase = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": _lowerCAmelCase = [3, 3, 6, 4] _lowerCAmelCase = [48, 56, 1_12, 2_20] elif swiftformer_name == "swiftformer_s": _lowerCAmelCase = [3, 3, 9, 6] _lowerCAmelCase = [48, 64, 1_68, 2_24] elif swiftformer_name == "swiftformer_l1": _lowerCAmelCase = [4, 3, 10, 5] _lowerCAmelCase = [48, 96, 1_92, 3_84] elif swiftformer_name == "swiftformer_l3": _lowerCAmelCase = [4, 4, 12, 6] _lowerCAmelCase = [64, 1_28, 3_20, 5_12] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith("""https""" ): _lowerCAmelCase = torch.hub.load_state_dict_from_url(lowerCAmelCase , map_location="""cpu""" , check_hash=lowerCAmelCase ) else: _lowerCAmelCase = torch.load(lowerCAmelCase , map_location="""cpu""" ) _lowerCAmelCase = checkpoint _lowerCAmelCase = create_rename_keys(lowerCAmelCase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) # load HuggingFace model _lowerCAmelCase = SwiftFormerForImageClassification(lowerCAmelCase ).eval() hf_model.load_state_dict(lowerCAmelCase ) # prepare test inputs _lowerCAmelCase = prepare_img() _lowerCAmelCase = ViTImageProcessor.from_pretrained("""preprocessor_config""" ) _lowerCAmelCase = processor(images=lowerCAmelCase , return_tensors="""pt""" ) # compare outputs from both models _lowerCAmelCase = get_expected_output(lowerCAmelCase ) _lowerCAmelCase = hf_model(inputs["""pixel_values"""] ).logits assert hf_logits.shape == torch.Size([1, 10_00] ) assert torch.allclose(hf_logits[0, 0:5] , lowerCAmelCase , atol=1e-3 ) Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase ) print(f"Saving model {swiftformer_name} to {pytorch_dump_folder_path}" ) hf_model.save_pretrained(lowerCAmelCase ) if __name__ == "__main__": A__ : str =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--swiftformer_name''', default='''swiftformer_xs''', choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''], type=str, help='''Name of the SwiftFormer model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''./converted_outputs/''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''') A__ : Tuple =parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
70
1
from __future__ import annotations from math import ceil, floor, sqrt def lowerCAmelCase_ ( __lowerCAmelCase = 2_00_00_00 )-> Optional[Any]: '''simple docstring''' UpperCAmelCase : list[int] =[0] UpperCAmelCase : int for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target UpperCAmelCase : int =0 # the area corresponding to the grid that gives the product closest to target UpperCAmelCase : int =0 # an estimate of b, using the quadratic formula UpperCAmelCase : float # the largest integer less than b_estimate UpperCAmelCase : int # the largest integer less than b_estimate UpperCAmelCase : int # the triangle number corresponding to b_floor UpperCAmelCase : int # the triangle number corresponding to b_ceil UpperCAmelCase : int for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): UpperCAmelCase : str =(-1 + sqrt(1 + 8 * target / triangle_a )) / 2 UpperCAmelCase : str =floor(lowerCAmelCase__ ) UpperCAmelCase : Tuple =ceil(lowerCAmelCase__ ) UpperCAmelCase : Tuple =triangle_numbers[b_floor] UpperCAmelCase : Optional[int] =triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): UpperCAmelCase : List[Any] =triangle_b_first_guess * triangle_a UpperCAmelCase : Optional[Any] =idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): UpperCAmelCase : List[Any] =triangle_b_second_guess * triangle_a UpperCAmelCase : str =idx_a * b_ceil return area if __name__ == "__main__": print(f'{solution() = }')
351
import functools import logging import os import sys import threading from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional import huggingface_hub.utils as hf_hub_utils from tqdm import auto as tqdm_lib __snake_case = threading.Lock() __snake_case = None __snake_case = { '''debug''': logging.DEBUG, '''info''': logging.INFO, '''warning''': logging.WARNING, '''error''': logging.ERROR, '''critical''': logging.CRITICAL, } __snake_case = logging.WARNING __snake_case = True def lowerCAmelCase_ ( )-> List[str]: '''simple docstring''' UpperCAmelCase : Optional[int] =os.getenv('''TRANSFORMERS_VERBOSITY''' , __lowerCAmelCase ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( f'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, ''' f'''has to be one of: { ', '.join(log_levels.keys() ) }''' ) return _default_log_level def lowerCAmelCase_ ( )-> str: '''simple docstring''' return __name__.split('''.''' )[0] def lowerCAmelCase_ ( )-> logging.Logger: '''simple docstring''' return logging.getLogger(_get_library_name() ) def lowerCAmelCase_ ( )-> None: '''simple docstring''' global _default_handler with _lock: if _default_handler: # This library has already configured the library root logger. return UpperCAmelCase : Union[str, Any] =logging.StreamHandler() # Set sys.stderr as stream. UpperCAmelCase : str =sys.stderr.flush # Apply our default configuration to the library root logger. UpperCAmelCase : List[Any] =_get_library_root_logger() library_root_logger.addHandler(_default_handler ) library_root_logger.setLevel(_get_default_logging_level() ) UpperCAmelCase : Optional[int] =False def lowerCAmelCase_ ( )-> None: '''simple docstring''' global _default_handler with _lock: if not _default_handler: return UpperCAmelCase : str =_get_library_root_logger() library_root_logger.removeHandler(_default_handler ) library_root_logger.setLevel(logging.NOTSET ) UpperCAmelCase : Optional[Any] =None def lowerCAmelCase_ ( )-> Tuple: '''simple docstring''' return log_levels def lowerCAmelCase_ ( __lowerCAmelCase = None )-> logging.Logger: '''simple docstring''' if name is None: UpperCAmelCase : int =_get_library_name() _configure_library_root_logger() return logging.getLogger(__lowerCAmelCase ) def lowerCAmelCase_ ( )-> int: '''simple docstring''' _configure_library_root_logger() return _get_library_root_logger().getEffectiveLevel() def lowerCAmelCase_ ( __lowerCAmelCase )-> None: '''simple docstring''' _configure_library_root_logger() _get_library_root_logger().setLevel(__lowerCAmelCase ) def lowerCAmelCase_ ( )-> Optional[int]: '''simple docstring''' return set_verbosity(__lowerCAmelCase ) def lowerCAmelCase_ ( )-> Tuple: '''simple docstring''' return set_verbosity(__lowerCAmelCase ) def lowerCAmelCase_ ( )-> Any: '''simple docstring''' return set_verbosity(__lowerCAmelCase ) def lowerCAmelCase_ ( )-> Dict: '''simple docstring''' return set_verbosity(__lowerCAmelCase ) def lowerCAmelCase_ ( )-> None: '''simple docstring''' _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().removeHandler(_default_handler ) def lowerCAmelCase_ ( )-> None: '''simple docstring''' _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().addHandler(_default_handler ) def lowerCAmelCase_ ( __lowerCAmelCase )-> None: '''simple docstring''' _configure_library_root_logger() assert handler is not None _get_library_root_logger().addHandler(__lowerCAmelCase ) def lowerCAmelCase_ ( __lowerCAmelCase )-> None: '''simple docstring''' _configure_library_root_logger() assert handler is not None and handler not in _get_library_root_logger().handlers _get_library_root_logger().removeHandler(__lowerCAmelCase ) def lowerCAmelCase_ ( )-> None: '''simple docstring''' _configure_library_root_logger() UpperCAmelCase : int =False def lowerCAmelCase_ ( )-> None: '''simple docstring''' _configure_library_root_logger() UpperCAmelCase : Tuple =True def lowerCAmelCase_ ( )-> None: '''simple docstring''' UpperCAmelCase : List[Any] =_get_library_root_logger().handlers for handler in handlers: UpperCAmelCase : str =logging.Formatter('''[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s''' ) handler.setFormatter(__lowerCAmelCase ) def lowerCAmelCase_ ( )-> None: '''simple docstring''' UpperCAmelCase : int =_get_library_root_logger().handlers for handler in handlers: handler.setFormatter(__lowerCAmelCase ) def lowerCAmelCase_ ( self , *__lowerCAmelCase , **__lowerCAmelCase )-> Union[str, Any]: '''simple docstring''' UpperCAmelCase : Optional[Any] =os.getenv('''TRANSFORMERS_NO_ADVISORY_WARNINGS''' , __lowerCAmelCase ) if no_advisory_warnings: return self.warning(*__lowerCAmelCase , **__lowerCAmelCase ) __snake_case = warning_advice @functools.lru_cache(__lowerCAmelCase ) def lowerCAmelCase_ ( self , *__lowerCAmelCase , **__lowerCAmelCase )-> Optional[int]: '''simple docstring''' self.warning(*__lowerCAmelCase , **__lowerCAmelCase ) __snake_case = warning_once class __snake_case : def __init__( self , *snake_case__ , **snake_case__ ) -> Dict: # pylint: disable=unused-argument '''simple docstring''' UpperCAmelCase : Any =args[0] if args else None def __iter__( self ) -> List[Any]: '''simple docstring''' return iter(self._iterator ) def __getattr__( self , snake_case__ ) -> str: '''simple docstring''' def empty_fn(*snake_case__ , **snake_case__ ): # pylint: disable=unused-argument return return empty_fn def __enter__( self ) -> int: '''simple docstring''' return self def __exit__( self , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]: '''simple docstring''' return class __snake_case : def __call__( self , *snake_case__ , **snake_case__ ) -> Tuple: '''simple docstring''' if _tqdm_active: return tqdm_lib.tqdm(*snake_case__ , **snake_case__ ) else: return EmptyTqdm(*snake_case__ , **snake_case__ ) def UpperCAmelCase__ ( self , *snake_case__ , **snake_case__ ) -> Any: '''simple docstring''' UpperCAmelCase : Union[str, Any] =None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*snake_case__ , **snake_case__ ) def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' if _tqdm_active: return tqdm_lib.tqdm.get_lock() __snake_case = _tqdm_cls() def lowerCAmelCase_ ( )-> bool: '''simple docstring''' global _tqdm_active return bool(_tqdm_active ) def lowerCAmelCase_ ( )-> Optional[Any]: '''simple docstring''' global _tqdm_active UpperCAmelCase : Dict =True hf_hub_utils.enable_progress_bars() def lowerCAmelCase_ ( )-> Optional[Any]: '''simple docstring''' global _tqdm_active UpperCAmelCase : List[str] =False hf_hub_utils.disable_progress_bars()
78
0
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __A =logging.get_logger(__name__) __A ={ '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''', '''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''', '''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''', '''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''', '''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''', '''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''', '''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''', '''self_attn.rotary_emb''': '''encoder.embed_positions''', '''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''', '''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''', '''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''', '''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''', '''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''', '''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''', '''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''', '''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''', '''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''', '''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''', '''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''', '''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } __A =[ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): for attribute in key.split("." ): lowerCamelCase_ = getattr(lowerCamelCase__ , lowerCamelCase__ ) if weight_type is not None: lowerCamelCase_ = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape else: lowerCamelCase_ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' F' {value.shape} for {full_name}' ) if weight_type == "weight": lowerCamelCase_ = value elif weight_type == "weight_g": lowerCamelCase_ = value elif weight_type == "weight_v": lowerCamelCase_ = value elif weight_type == "bias": lowerCamelCase_ = value elif weight_type == "running_mean": lowerCamelCase_ = value elif weight_type == "running_var": lowerCamelCase_ = value elif weight_type == "num_batches_tracked": lowerCamelCase_ = value elif weight_type == "inv_freq": lowerCamelCase_ = value else: lowerCamelCase_ = value logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): lowerCamelCase_ = [] lowerCamelCase_ = fairseq_model.state_dict() lowerCamelCase_ = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): lowerCamelCase_ = False if "conv_layers" in name: load_conv_layer( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , hf_model.config.feat_extract_norm == "group" , ) lowerCamelCase_ = True else: for key, mapped_key in MAPPING.items(): lowerCamelCase_ = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: lowerCamelCase_ = True if "*" in mapped_key: lowerCamelCase_ = name.split(lowerCamelCase__ )[0].split("." )[-2] lowerCamelCase_ = mapped_key.replace("*" , lowerCamelCase__ ) if "pos_bias_u" in name: lowerCamelCase_ = None elif "pos_bias_v" in name: lowerCamelCase_ = None elif "weight_g" in name: lowerCamelCase_ = "weight_g" elif "weight_v" in name: lowerCamelCase_ = "weight_v" elif "bias" in name: lowerCamelCase_ = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj lowerCamelCase_ = "weight" elif "running_mean" in name: lowerCamelCase_ = "running_mean" elif "inv_freq" in name: lowerCamelCase_ = "inv_freq" elif "running_var" in name: lowerCamelCase_ = "running_var" elif "num_batches_tracked" in name: lowerCamelCase_ = "num_batches_tracked" else: lowerCamelCase_ = None set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) continue if not is_used: unused_weights.append(lowerCamelCase__ ) logger.warning(F'Unused weights: {unused_weights}' ) def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): lowerCamelCase_ = full_name.split("conv_layers." )[-1] lowerCamelCase_ = name.split("." ) lowerCamelCase_ = int(items[0] ) lowerCamelCase_ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) lowerCamelCase_ = value logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) lowerCamelCase_ = value logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' ) lowerCamelCase_ = value logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' ) lowerCamelCase_ = value logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(lowerCamelCase__ ) @torch.no_grad() def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True ): if config_path is not None: lowerCamelCase_ = WavaVecaConformerConfig.from_pretrained(lowerCamelCase__ , hidden_act="swish" ) else: lowerCamelCase_ = WavaVecaConformerConfig() if "rope" in checkpoint_path: lowerCamelCase_ = "rotary" if is_finetuned: if dict_path: lowerCamelCase_ = Dictionary.load(lowerCamelCase__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq lowerCamelCase_ = target_dict.pad_index lowerCamelCase_ = target_dict.bos_index lowerCamelCase_ = target_dict.eos_index lowerCamelCase_ = len(target_dict.symbols ) lowerCamelCase_ = os.path.join(lowerCamelCase__ , "vocab.json" ) if not os.path.isdir(lowerCamelCase__ ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowerCamelCase__ ) ) return os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ ) lowerCamelCase_ = target_dict.indices # fairseq has the <pad> and <s> switched lowerCamelCase_ = 0 lowerCamelCase_ = 1 with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as vocab_handle: json.dump(lowerCamelCase__ , lowerCamelCase__ ) lowerCamelCase_ = WavaVecaCTCTokenizer( lowerCamelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=lowerCamelCase__ , ) lowerCamelCase_ = True if config.feat_extract_norm == "layer" else False lowerCamelCase_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , ) lowerCamelCase_ = WavaVecaProcessor(feature_extractor=lowerCamelCase__ , tokenizer=lowerCamelCase__ ) processor.save_pretrained(lowerCamelCase__ ) lowerCamelCase_ = WavaVecaConformerForCTC(lowerCamelCase__ ) else: lowerCamelCase_ = WavaVecaConformerForPreTraining(lowerCamelCase__ ) if is_finetuned: lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: lowerCamelCase_ = argparse.Namespace(task="audio_pretraining" ) lowerCamelCase_ = fairseq.tasks.setup_task(lowerCamelCase__ ) lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCamelCase__ ) lowerCamelCase_ = model[0].eval() recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , not is_finetuned ) hf_wavavec.save_pretrained(lowerCamelCase__ ) if __name__ == "__main__": __A =argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) __A =parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
19
from collections import deque def lowerCamelCase_ ( lowerCamelCase__ ): lowerCamelCase_ = len(lowerCamelCase__ ) lowerCamelCase_ = deque() lowerCamelCase_ = [False for _ in range(lowerCamelCase__ )] lowerCamelCase_ = [-1 for _ in range(lowerCamelCase__ )] lowerCamelCase_ = index_of[:] def strong_connect(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): lowerCamelCase_ = index # the number when this node is seen lowerCamelCase_ = index # lowest rank node reachable from here index += 1 stack.append(lowerCamelCase__ ) lowerCamelCase_ = True for w in g[v]: if index_of[w] == -1: lowerCamelCase_ = strong_connect(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) lowerCamelCase_ = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) elif on_stack[w]: lowerCamelCase_ = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) if lowlink_of[v] == index_of[v]: lowerCamelCase_ = [] lowerCamelCase_ = stack.pop() lowerCamelCase_ = False component.append(lowerCamelCase__ ) while w != v: lowerCamelCase_ = stack.pop() lowerCamelCase_ = False component.append(lowerCamelCase__ ) components.append(lowerCamelCase__ ) return index lowerCamelCase_ = [] for v in range(lowerCamelCase__ ): if index_of[v] == -1: strong_connect(lowerCamelCase__ , 0 , lowerCamelCase__ ) return components def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ): lowerCamelCase_ = [[] for _ in range(lowerCamelCase__ )] for u, v in edges: g[u].append(lowerCamelCase__ ) return g if __name__ == "__main__": # Test __A =7 __A =[0, 0, 1, 2, 3, 3, 4, 4, 6] __A =[1, 3, 2, 0, 1, 4, 5, 6, 5] __A =[(u, v) for u, v in zip(source, target)] __A =create_graph(n_vertices, edges) assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
19
1
import argparse from collections import defaultdict import yaml a : List[Any] = 'docs/source/en/_toctree.yml' def lowerCAmelCase_ (lowerCAmelCase__: str ): """simple docstring""" UpperCAmelCase_: Optional[int] = defaultdict(lowerCAmelCase__ ) UpperCAmelCase_: Tuple = [] UpperCAmelCase_: List[Any] = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} ) else: new_doc_list.append(lowerCAmelCase__ ) UpperCAmelCase_: Tuple = new_doc_list UpperCAmelCase_: Tuple = [key for key, value in counts.items() if value > 1] UpperCAmelCase_: Tuple = [] for duplicate_key in duplicates: UpperCAmelCase_: Tuple = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} ) if len(lowerCAmelCase__ ) > 1: raise ValueError( F'{duplicate_key} is present several times in the documentation table of content at ' """`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """ """others.""" ) # Only add this once new_doc.append({"""local""": duplicate_key, """title""": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] ) UpperCAmelCase_: Optional[int] = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : s["title"].lower() ) # "overview" gets special treatment and is always first if len(lowerCAmelCase__ ) > 1: raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" ) overview_doc.extend(lowerCAmelCase__ ) # Sort return overview_doc def lowerCAmelCase_ (lowerCAmelCase__: int=False ): """simple docstring""" with open(lowerCAmelCase__ , encoding="""utf-8""" ) as f: UpperCAmelCase_: List[str] = yaml.safe_load(f.read() ) # Get to the API doc UpperCAmelCase_: List[str] = 0 while content[api_idx]["title"] != "API": api_idx += 1 UpperCAmelCase_: Any = content[api_idx]["""sections"""] # Then to the model doc UpperCAmelCase_: int = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 UpperCAmelCase_: Optional[Any] = api_doc[scheduler_idx]["""sections"""] UpperCAmelCase_: Any = clean_doc_toc(lowerCAmelCase__ ) UpperCAmelCase_: str = False if new_scheduler_doc != scheduler_doc: UpperCAmelCase_: str = True if overwrite: UpperCAmelCase_: Tuple = new_scheduler_doc if diff: if overwrite: UpperCAmelCase_: Union[str, Any] = api_doc with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as f: f.write(yaml.dump(lowerCAmelCase__ , allow_unicode=lowerCAmelCase__ ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) def lowerCAmelCase_ (lowerCAmelCase__: int=False ): """simple docstring""" with open(lowerCAmelCase__ , encoding="""utf-8""" ) as f: UpperCAmelCase_: List[str] = yaml.safe_load(f.read() ) # Get to the API doc UpperCAmelCase_: Tuple = 0 while content[api_idx]["title"] != "API": api_idx += 1 UpperCAmelCase_: Dict = content[api_idx]["""sections"""] # Then to the model doc UpperCAmelCase_: Optional[Any] = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 UpperCAmelCase_: List[Any] = False UpperCAmelCase_: Union[str, Any] = api_doc[pipeline_idx]["""sections"""] UpperCAmelCase_: str = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: UpperCAmelCase_: Dict = pipeline_doc["""section"""] UpperCAmelCase_: Optional[int] = clean_doc_toc(lowerCAmelCase__ ) if overwrite: UpperCAmelCase_: Dict = new_sub_pipeline_doc new_pipeline_docs.append(lowerCAmelCase__ ) # sort overall pipeline doc UpperCAmelCase_: int = clean_doc_toc(lowerCAmelCase__ ) if new_pipeline_docs != pipeline_docs: UpperCAmelCase_: Any = True if overwrite: UpperCAmelCase_: Any = new_pipeline_docs if diff: if overwrite: UpperCAmelCase_: Union[str, Any] = api_doc with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as f: f.write(yaml.dump(lowerCAmelCase__ , allow_unicode=lowerCAmelCase__ ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) if __name__ == "__main__": a : List[str] = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') a : Tuple = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
350
a : Tuple = 'Tobias Carryer' from time import time class _a : def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=int(time() ) ) -> List[Any]: # noqa: B008 UpperCAmelCase_: List[str] = multiplier UpperCAmelCase_: Tuple = increment UpperCAmelCase_: Tuple = modulo UpperCAmelCase_: List[str] = seed def __snake_case (self ) -> Any: UpperCAmelCase_: List[str] = (self.multiplier * self.seed + self.increment) % self.modulo return self.seed if __name__ == "__main__": # Show the LCG in action. a : Optional[int] = LinearCongruentialGenerator(1_664_525, 1_013_904_223, 2 << 31) while True: print(lcg.next_number())
82
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) lowercase_ = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""ReformerTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""ReformerTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """ReformerAttention""", """ReformerForMaskedLM""", """ReformerForQuestionAnswering""", """ReformerForSequenceClassification""", """ReformerLayer""", """ReformerModel""", """ReformerModelWithLMHead""", """ReformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
303
"""simple docstring""" from __future__ import annotations from collections.abc import Iterator from typing import Any class UpperCamelCase : def __init__( self, lowerCAmelCase__) -> Optional[int]: snake_case_ = data snake_case_ = None class UpperCamelCase : def __init__( self) -> Dict: snake_case_ = None snake_case_ = None def __iter__( self) -> Iterator[Any]: snake_case_ = self.head while self.head: yield node.data snake_case_ = node.next if node == self.head: break def __len__( self) -> int: return sum(1 for _ in self) def __repr__( self) -> str: return "->".join(str(lowerCAmelCase__) for item in iter(self)) def a_ ( self, lowerCAmelCase__) -> None: self.insert_nth(len(self), lowerCAmelCase__) def a_ ( self, lowerCAmelCase__) -> None: self.insert_nth(0, lowerCAmelCase__) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> None: if index < 0 or index > len(self): raise IndexError('list index out of range.') snake_case_ = Node(lowerCAmelCase__) if self.head is None: snake_case_ = new_node # first node points itself snake_case_ = snake_case_ = new_node elif index == 0: # insert at head snake_case_ = self.head snake_case_ = snake_case_ = new_node else: snake_case_ = self.head for _ in range(index - 1): snake_case_ = temp.next snake_case_ = temp.next snake_case_ = new_node if index == len(self) - 1: # insert at tail snake_case_ = new_node def a_ ( self) -> str: return self.delete_nth(0) def a_ ( self) -> Any: return self.delete_nth(len(self) - 1) def a_ ( self, lowerCAmelCase__ = 0) -> Any: if not 0 <= index < len(self): raise IndexError('list index out of range.') snake_case_ = self.head if self.head == self.tail: # just one node snake_case_ = snake_case_ = None elif index == 0: # delete head node snake_case_ = self.tail.next.next snake_case_ = self.head.next else: snake_case_ = self.head for _ in range(index - 1): snake_case_ = temp.next snake_case_ = temp.next snake_case_ = temp.next.next if index == len(self) - 1: # delete at tail snake_case_ = temp return delete_node.data def a_ ( self) -> bool: return len(self) == 0 def UpperCAmelCase ( ) -> None: snake_case_ = CircularLinkedList() assert len(UpperCAmelCase ) == 0 assert circular_linked_list.is_empty() is True assert str(UpperCAmelCase ) == "" try: circular_linked_list.delete_front() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_tail() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_nth(-1 ) raise AssertionError except IndexError: assert True try: circular_linked_list.delete_nth(0 ) raise AssertionError except IndexError: assert True assert circular_linked_list.is_empty() is True for i in range(5 ): assert len(UpperCAmelCase ) == i circular_linked_list.insert_nth(UpperCAmelCase , i + 1 ) assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(1 , 6 ) ) circular_linked_list.insert_tail(6 ) assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(1 , 7 ) ) circular_linked_list.insert_head(0 ) assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(0 , 7 ) ) assert circular_linked_list.delete_front() == 0 assert circular_linked_list.delete_tail() == 6 assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(1 , 6 ) ) assert circular_linked_list.delete_nth(2 ) == 3 circular_linked_list.insert_nth(2 , 3 ) assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(1 , 6 ) ) assert circular_linked_list.is_empty() is False if __name__ == "__main__": import doctest doctest.testmod()
69
0
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def __lowerCAmelCase (): __lowerCAmelCase : List[Any] = ArgumentParser('Accelerate CLI tool' , usage='accelerate <command> [<args>]' , allow_abbrev=_UpperCamelCase ) __lowerCAmelCase : Optional[int] = parser.add_subparsers(help='accelerate command helpers' ) # Register commands get_config_parser(subparsers=_UpperCamelCase ) env_command_parser(subparsers=_UpperCamelCase ) launch_command_parser(subparsers=_UpperCamelCase ) tpu_command_parser(subparsers=_UpperCamelCase ) test_command_parser(subparsers=_UpperCamelCase ) # Let's go __lowerCAmelCase : Any = parser.parse_args() if not hasattr(_UpperCamelCase , 'func' ): parser.print_help() exit(1 ) # Run args.func(_UpperCamelCase ) if __name__ == "__main__": main()
182
"""simple docstring""" import qiskit def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : Union[str, Any] = qiskit.Aer.get_backend('aer_simulator' ) # Create a Quantum Circuit acting on the q register __lowerCAmelCase : str = qiskit.QuantumCircuit(_UpperCamelCase , _UpperCamelCase ) # Map the quantum measurement to the classical bits circuit.measure([0] , [0] ) # Execute the circuit on the simulator __lowerCAmelCase : Optional[int] = qiskit.execute(_UpperCamelCase , _UpperCamelCase , shots=1000 ) # Return the histogram data of the results of the experiment. return job.result().get_counts(_UpperCamelCase ) if __name__ == "__main__": print(f'Total count for various states are: {single_qubit_measure(1, 1)}')
182
1
import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline __a :str = argparse.ArgumentParser('Stable Diffusion script with intel optimization', add_help=False) parser.add_argument('--dpm', action='store_true', help='Enable DPMSolver or not') parser.add_argument('--steps', default=None, type=int, help='Num inference steps') __a :Any = parser.parse_args() __a :Dict = 'cpu' __a :Dict = 'a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings' __a :Optional[Any] = 'path-to-your-trained-model' __a :Dict = StableDiffusionPipeline.from_pretrained(model_id) if args.dpm: __a :List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) __a :Optional[int] = pipe.to(device) # to channels last __a :Any = pipe.unet.to(memory_format=torch.channels_last) __a :Any = pipe.vae.to(memory_format=torch.channels_last) __a :Any = pipe.text_encoder.to(memory_format=torch.channels_last) if pipe.requires_safety_checker: __a :Any = pipe.safety_checker.to(memory_format=torch.channels_last) # optimize with ipex __a :str = torch.randn(2, 4, 64, 64) __a :Optional[int] = torch.rand(1) * 999 __a :Optional[int] = torch.randn(2, 77, 768) __a :int = (sample, timestep, encoder_hidden_status) try: __a :Optional[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example) except Exception: __a :List[str] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True) __a :List[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True) __a :List[Any] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True) if pipe.requires_safety_checker: __a :Optional[int] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True) # compute __a :List[str] = 666 __a :List[Any] = torch.Generator(device).manual_seed(seed) __a :int = {'generator': generator} if args.steps is not None: __a :Optional[int] = args.steps with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa): __a :Optional[Any] = pipe(prompt, **generate_kwargs).images[0] # save image image.save('generated.png')
312
def __snake_case ( __UpperCamelCase : bytes ): """simple docstring""" return "".join([hex(__UpperCamelCase )[2:].zfill(2 ).upper() for byte in list(__UpperCamelCase )] ) def __snake_case ( __UpperCamelCase : str ): """simple docstring""" if (len(__UpperCamelCase ) % 2) != 0: raise ValueError( "Base16 encoded data is invalid:\nData does not have an even number of hex digits." ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(__UpperCamelCase ) <= set("0123456789ABCDEF" ): raise ValueError( "Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] ,16 ) for i in range(0 ,len(__UpperCamelCase ) ,2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
312
1
'''simple docstring''' import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) A : str = pytest.mark.integration @pytest.mark.parametrize('path' ,['paws', 'csv'] ) def lowerCAmelCase__ ( lowerCamelCase : str ,lowerCamelCase : List[str] ): inspect_dataset(lowerCamelCase ,lowerCamelCase ) _A : Union[str, Any] = path + '.py' assert script_name in os.listdir(lowerCamelCase ) assert "__pycache__" not in os.listdir(lowerCamelCase ) @pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' ) @pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' ) @pytest.mark.parametrize('path' ,['accuracy'] ) def lowerCAmelCase__ ( lowerCamelCase : Dict ,lowerCamelCase : Optional[int] ): inspect_metric(lowerCamelCase ,lowerCamelCase ) _A : Union[str, Any] = path + '.py' assert script_name in os.listdir(lowerCamelCase ) assert "__pycache__" not in os.listdir(lowerCamelCase ) @pytest.mark.parametrize( 'path, config_name, expected_splits' ,[ ('squad', 'plain_text', ['train', 'validation']), ('dalle-mini/wit', 'dalle-mini--wit', ['train']), ('paws', 'labeled_final', ['train', 'test', 'validation']), ] ,) def lowerCAmelCase__ ( lowerCamelCase : Dict ,lowerCamelCase : Optional[Any] ,lowerCamelCase : str ): _A : Dict = get_dataset_config_info(lowerCamelCase ,config_name=lowerCamelCase ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( 'path, config_name, expected_exception' ,[ ('paws', None, ValueError), ] ,) def lowerCAmelCase__ ( lowerCamelCase : Union[str, Any] ,lowerCamelCase : Tuple ,lowerCamelCase : List[Any] ): with pytest.raises(lowerCamelCase ): get_dataset_config_info(lowerCamelCase ,config_name=lowerCamelCase ) @pytest.mark.parametrize( 'path, expected' ,[ ('squad', 'plain_text'), ('acronym_identification', 'default'), ('lhoestq/squad', 'plain_text'), ('lhoestq/test', 'default'), ('lhoestq/demo1', 'lhoestq--demo1'), ('dalle-mini/wit', 'dalle-mini--wit'), ] ,) def lowerCAmelCase__ ( lowerCamelCase : Union[str, Any] ,lowerCamelCase : Tuple ): _A : Any = get_dataset_config_names(lowerCamelCase ) assert expected in config_names @pytest.mark.parametrize( 'path, expected_configs, expected_splits_in_first_config' ,[ ('squad', ['plain_text'], ['train', 'validation']), ('dalle-mini/wit', ['dalle-mini--wit'], ['train']), ('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']), ] ,) def lowerCAmelCase__ ( lowerCamelCase : Union[str, Any] ,lowerCamelCase : List[str] ,lowerCamelCase : int ): _A : Union[str, Any] = get_dataset_infos(lowerCamelCase ) assert list(infos.keys() ) == expected_configs _A : Optional[int] = expected_configs[0] assert expected_config in infos _A : Optional[int] = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( 'path, expected_config, expected_splits' ,[ ('squad', 'plain_text', ['train', 'validation']), ('dalle-mini/wit', 'dalle-mini--wit', ['train']), ('paws', 'labeled_final', ['train', 'test', 'validation']), ] ,) def lowerCAmelCase__ ( lowerCamelCase : Union[str, Any] ,lowerCamelCase : List[str] ,lowerCamelCase : Optional[Any] ): _A : int = get_dataset_infos(lowerCamelCase ) assert expected_config in infos _A : List[str] = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( 'path, config_name, expected_exception' ,[ ('paws', None, ValueError), ] ,) def lowerCAmelCase__ ( lowerCamelCase : Union[str, Any] ,lowerCamelCase : str ,lowerCamelCase : Dict ): with pytest.raises(lowerCamelCase ): get_dataset_split_names(lowerCamelCase ,config_name=lowerCamelCase )
227
'''simple docstring''' import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def lowerCAmelCase__ ( lowerCamelCase : int ,lowerCamelCase : str=0.999 ,lowerCamelCase : int="cosine" ,): if alpha_transform_type == "cosine": def alpha_bar_fn(lowerCamelCase : Tuple ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(lowerCamelCase : List[Any] ): return math.exp(t * -12.0 ) else: raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' ) _A : Tuple = [] for i in range(lowerCamelCase ): _A : List[Any] = i / num_diffusion_timesteps _A : List[str] = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(lowerCamelCase ) / alpha_bar_fn(lowerCamelCase ) ,lowerCamelCase ) ) return torch.tensor(lowerCamelCase ,dtype=torch.floataa ) class __lowerCamelCase ( a_ , a_ ): """simple docstring""" a = [e.name for e in KarrasDiffusionSchedulers] a = 2 @register_to_config def __init__( self : int , SCREAMING_SNAKE_CASE : int = 1000 , SCREAMING_SNAKE_CASE : float = 0.0_0085 , SCREAMING_SNAKE_CASE : float = 0.012 , SCREAMING_SNAKE_CASE : str = "linear" , SCREAMING_SNAKE_CASE : Optional[Union[np.ndarray, List[float]]] = None , SCREAMING_SNAKE_CASE : str = "epsilon" , SCREAMING_SNAKE_CASE : str = "linspace" , SCREAMING_SNAKE_CASE : int = 0 , ): if trained_betas is not None: _A : Optional[int] = torch.tensor(SCREAMING_SNAKE_CASE , dtype=torch.floataa) elif beta_schedule == "linear": _A : List[Any] = torch.linspace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=torch.floataa) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. _A : Any = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , SCREAMING_SNAKE_CASE , dtype=torch.floataa) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule _A : Optional[Any] = betas_for_alpha_bar(SCREAMING_SNAKE_CASE) else: raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}') _A : Any = 1.0 - self.betas _A : List[Any] = torch.cumprod(self.alphas , dim=0) # set all values self.set_timesteps(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) def A ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any=None): if schedule_timesteps is None: _A : Dict = self.timesteps _A : List[Any] = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter) == 0: _A : Dict = 1 if len(SCREAMING_SNAKE_CASE) > 1 else 0 else: _A : Union[str, Any] = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE) else timestep _A : int = self._index_counter[timestep_int] return indices[pos].item() @property def A ( self : Optional[Any]): # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def A ( self : List[Any] , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : Union[float, torch.FloatTensor] , ): _A : Tuple = self.index_for_timestep(SCREAMING_SNAKE_CASE) if self.state_in_first_order: _A : Any = self.sigmas[step_index] else: _A : int = self.sigmas_interpol[step_index] _A : Union[str, Any] = sample / ((sigma**2 + 1) ** 0.5) return sample def A ( self : Tuple , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, torch.device] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , ): _A : Optional[Any] = num_inference_steps _A : int = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": _A : Tuple = np.linspace(0 , num_train_timesteps - 1 , SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE)[::-1].copy() elif self.config.timestep_spacing == "leading": _A : Optional[Any] = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 _A : int = (np.arange(0 , SCREAMING_SNAKE_CASE) * step_ratio).round()[::-1].copy().astype(SCREAMING_SNAKE_CASE) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": _A : List[str] = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 _A : str = (np.arange(SCREAMING_SNAKE_CASE , 0 , -step_ratio)).round().copy().astype(SCREAMING_SNAKE_CASE) timesteps -= 1 else: raise ValueError( F'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.') _A : List[str] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) _A : Optional[int] = torch.from_numpy(np.log(SCREAMING_SNAKE_CASE)).to(SCREAMING_SNAKE_CASE) _A : str = np.interp(SCREAMING_SNAKE_CASE , np.arange(0 , len(SCREAMING_SNAKE_CASE)) , SCREAMING_SNAKE_CASE) _A : str = np.concatenate([sigmas, [0.0]]).astype(np.floataa) _A : Union[str, Any] = torch.from_numpy(SCREAMING_SNAKE_CASE).to(device=SCREAMING_SNAKE_CASE) # interpolate sigmas _A : Optional[int] = sigmas.log().lerp(sigmas.roll(1).log() , 0.5).exp() _A : Any = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2), sigmas[-1:]]) _A : List[Any] = torch.cat( [sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2), sigmas_interpol[-1:]]) if str(SCREAMING_SNAKE_CASE).startswith('mps'): # mps does not support float64 _A : Union[str, Any] = torch.from_numpy(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE , dtype=torch.floataa) else: _A : Dict = torch.from_numpy(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE) # interpolate timesteps _A : Optional[int] = self.sigma_to_t(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE , dtype=timesteps.dtype) _A : Union[str, Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1).flatten() _A : Optional[Any] = torch.cat([timesteps[:1], interleaved_timesteps]) _A : str = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter _A : Union[str, Any] = defaultdict(SCREAMING_SNAKE_CASE) def A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any]): # get log sigma _A : Dict = sigma.log() # get distribution _A : Any = log_sigma - self.log_sigmas[:, None] # get sigmas range _A : Tuple = dists.ge(0).cumsum(dim=0).argmax(dim=0).clamp(max=self.log_sigmas.shape[0] - 2) _A : Union[str, Any] = low_idx + 1 _A : Dict = self.log_sigmas[low_idx] _A : List[Any] = self.log_sigmas[high_idx] # interpolate sigmas _A : Dict = (low - log_sigma) / (low - high) _A : Union[str, Any] = w.clamp(0 , 1) # transform interpolation to time range _A : int = (1 - w) * low_idx + w * high_idx _A : Any = t.view(sigma.shape) return t @property def A ( self : Any): return self.sample is None def A ( self : int , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, np.ndarray] , SCREAMING_SNAKE_CASE : Union[float, torch.FloatTensor] , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, np.ndarray] , SCREAMING_SNAKE_CASE : bool = True , ): _A : Optional[int] = self.index_for_timestep(SCREAMING_SNAKE_CASE) # advance index counter by 1 _A : Dict = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: _A : Tuple = self.sigmas[step_index] _A : Dict = self.sigmas_interpol[step_index + 1] _A : Union[str, Any] = self.sigmas[step_index + 1] else: # 2nd order / KDPM2's method _A : int = self.sigmas[step_index - 1] _A : Union[str, Any] = self.sigmas_interpol[step_index] _A : Dict = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API _A : List[Any] = 0 _A : Dict = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": _A : Tuple = sigma_hat if self.state_in_first_order else sigma_interpol _A : Tuple = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": _A : Any = sigma_hat if self.state_in_first_order else sigma_interpol _A : Union[str, Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": raise NotImplementedError('prediction_type not implemented yet: sample') else: raise ValueError( F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`') if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order _A : int = (sample - pred_original_sample) / sigma_hat # 3. delta timestep _A : str = sigma_interpol - sigma_hat # store for 2nd order step _A : List[str] = sample else: # DPM-Solver-2 # 2. Convert to an ODE derivative for 2nd order _A : List[Any] = (sample - pred_original_sample) / sigma_interpol # 3. delta timestep _A : Optional[int] = sigma_next - sigma_hat _A : str = self.sample _A : Any = None _A : Tuple = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE) def A ( self : Any , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : torch.FloatTensor , ): # Make sure sigmas and timesteps have the same device and dtype as original_samples _A : str = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype) if original_samples.device.type == "mps" and torch.is_floating_point(SCREAMING_SNAKE_CASE): # mps does not support float64 _A : Any = self.timesteps.to(original_samples.device , dtype=torch.floataa) _A : List[str] = timesteps.to(original_samples.device , dtype=torch.floataa) else: _A : str = self.timesteps.to(original_samples.device) _A : str = timesteps.to(original_samples.device) _A : int = [self.index_for_timestep(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) for t in timesteps] _A : Tuple = sigmas[step_indices].flatten() while len(sigma.shape) < len(original_samples.shape): _A : List[Any] = sigma.unsqueeze(-1) _A : Dict = original_samples + noise * sigma return noisy_samples def __len__( self : List[Any]): return self.config.num_train_timesteps
227
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) snake_case_ = { 'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = ['MobileViTFeatureExtractor'] snake_case_ = ['MobileViTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = [ 'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'MobileViTForImageClassification', 'MobileViTForSemanticSegmentation', 'MobileViTModel', 'MobileViTPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = [ 'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFMobileViTForImageClassification', 'TFMobileViTForSemanticSegmentation', 'TFMobileViTModel', 'TFMobileViTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilevit import MobileViTFeatureExtractor from .image_processing_mobilevit import MobileViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilevit import ( MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel, MobileViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilevit import ( TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel, TFMobileViTPreTrainedModel, ) else: import sys snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
24
"""simple docstring""" import argparse from pathlib import Path import torch from packaging import version from torch.onnx import export from diffusers import AutoencoderKL __lowerCAmelCase : Dict =version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""") def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :tuple , lowerCAmelCase__ :Path , lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[str]=False , ) -> Union[str, Any]: '''simple docstring''' output_path.parent.mkdir(parents=lowerCAmelCase__ , exist_ok=lowerCAmelCase__ ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( lowerCAmelCase__ , lowerCAmelCase__ , f=output_path.as_posix() , input_names=lowerCAmelCase__ , output_names=lowerCAmelCase__ , dynamic_axes=lowerCAmelCase__ , do_constant_folding=lowerCAmelCase__ , use_external_data_format=lowerCAmelCase__ , enable_onnx_checker=lowerCAmelCase__ , opset_version=lowerCAmelCase__ , ) else: export( lowerCAmelCase__ , lowerCAmelCase__ , f=output_path.as_posix() , input_names=lowerCAmelCase__ , output_names=lowerCAmelCase__ , dynamic_axes=lowerCAmelCase__ , do_constant_folding=lowerCAmelCase__ , opset_version=lowerCAmelCase__ , ) @torch.no_grad() def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :bool = False ) -> str: '''simple docstring''' lowercase = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): lowercase = """cuda""" elif fpaa and not torch.cuda.is_available(): raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" ) else: lowercase = """cpu""" lowercase = Path(lowerCAmelCase__ ) # VAE DECODER lowercase = AutoencoderKL.from_pretrained(model_path + """/vae""" ) lowercase = vae_decoder.config.latent_channels # forward only through the decoder part lowercase = vae_decoder.decode onnx_export( lowerCAmelCase__ , model_args=( torch.randn(1 , lowerCAmelCase__ , 2_5 , 2_5 ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ), False, ) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={ """latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""}, } , opset=lowerCAmelCase__ , ) del vae_decoder if __name__ == "__main__": __lowerCAmelCase : Tuple =argparse.ArgumentParser() parser.add_argument( """--model_path""", type=str, required=True, help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""", ) parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--opset""", default=1_4, type=int, help="""The version of the ONNX operator set to use.""", ) parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""") __lowerCAmelCase : Dict =parser.parse_args() print(args.output_path) convert_models(args.model_path, args.output_path, args.opset, args.fpaa) print("""SD: Done: ONNX""")
197
0
from collections.abc import Generator def lowerCAmelCase__ ( ): snake_case_ , snake_case_ : Optional[int] = 0, 1 while True: snake_case_ , snake_case_ : str = b, a + b yield b def lowerCAmelCase__ ( _a : int = 10_00 ): snake_case_ : int = 1 snake_case_ : Tuple = fibonacci_generator() while len(str(next(_a ) ) ) < n: answer += 1 return answer + 1 if __name__ == "__main__": print(solution(int(str(input()).strip())))
36
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
36
1
"""simple docstring""" import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class lowercase ( _UpperCAmelCase ): def _snake_case ( self ) -> List[Any]: lowerCAmelCase = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(lowercase , """width_multiplier""" ) ) class lowercase : def __init__( self , lowercase , lowercase=13 , lowercase=64 , lowercase=2 , lowercase=3 , lowercase="swish" , lowercase=3 , lowercase=32 , lowercase=0.1 , lowercase=0.02 , lowercase=True , lowercase=True , lowercase=10 , lowercase=None , lowercase=0.25 , lowercase=0.0 , lowercase=0.0 , ) -> List[str]: lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = image_size lowerCAmelCase = patch_size lowerCAmelCase = num_channels lowerCAmelCase = make_divisible(512 * width_multiplier , divisor=8 ) lowerCAmelCase = hidden_act lowerCAmelCase = conv_kernel_size lowerCAmelCase = output_stride lowerCAmelCase = classifier_dropout_prob lowerCAmelCase = use_labels lowerCAmelCase = is_training lowerCAmelCase = num_labels lowerCAmelCase = initializer_range lowerCAmelCase = scope lowerCAmelCase = width_multiplier lowerCAmelCase = ffn_dropout lowerCAmelCase = attn_dropout def _snake_case ( self ) -> Union[str, Any]: lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase = None lowerCAmelCase = None if self.use_labels: lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels ) lowerCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) lowerCAmelCase = self.get_config() return config, pixel_values, labels, pixel_labels def _snake_case ( self ) -> List[str]: return MobileViTVaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , ) def _snake_case ( self , lowercase , lowercase , lowercase , lowercase ) -> List[Any]: lowerCAmelCase = MobileViTVaModel(config=lowercase ) model.to(lowercase ) model.eval() lowerCAmelCase = model(lowercase ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def _snake_case ( self , lowercase , lowercase , lowercase , lowercase ) -> Union[str, Any]: lowerCAmelCase = self.num_labels lowerCAmelCase = MobileViTVaForImageClassification(lowercase ) model.to(lowercase ) model.eval() lowerCAmelCase = model(lowercase , labels=lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]: lowerCAmelCase = self.num_labels lowerCAmelCase = MobileViTVaForSemanticSegmentation(lowercase ) model.to(lowercase ) model.eval() lowerCAmelCase = model(lowercase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) lowerCAmelCase = model(lowercase , labels=lowercase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def _snake_case ( self ) -> str: lowerCAmelCase = self.prepare_config_and_inputs() lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs lowerCAmelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): _SCREAMING_SNAKE_CASE = ( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) _SCREAMING_SNAKE_CASE = ( { 'feature-extraction': MobileViTVaModel, 'image-classification': MobileViTVaForImageClassification, 'image-segmentation': MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False def _snake_case ( self ) -> Dict: lowerCAmelCase = MobileViTVaModelTester(self ) lowerCAmelCase = MobileViTVaConfigTester(self , config_class=lowercase , has_text_modality=lowercase ) def _snake_case ( self ) -> Optional[int]: self.config_tester.run_common_tests() @unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" ) def _snake_case ( self ) -> List[str]: pass @unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" ) def _snake_case ( self ) -> Optional[Any]: pass @unittest.skip(reason="""MobileViTV2 does not output attentions""" ) def _snake_case ( self ) -> Any: pass @require_torch_multi_gpu @unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" ) def _snake_case ( self ) -> str: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def _snake_case ( self ) -> List[Any]: pass def _snake_case ( self ) -> Optional[Any]: lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase = model_class(lowercase ) lowerCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase = [*signature.parameters.keys()] lowerCAmelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowercase ) def _snake_case ( self ) -> Optional[int]: lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase ) def _snake_case ( self ) -> List[str]: def check_hidden_states_output(lowercase , lowercase , lowercase ): lowerCAmelCase = model_class(lowercase ) model.to(lowercase ) model.eval() with torch.no_grad(): lowerCAmelCase = model(**self._prepare_for_class(lowercase , lowercase ) ) lowerCAmelCase = outputs.hidden_states lowerCAmelCase = 5 self.assertEqual(len(lowercase ) , lowercase ) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. lowerCAmelCase = 2 for i in range(len(lowercase ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase = True check_hidden_states_output(lowercase , lowercase , lowercase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase = True check_hidden_states_output(lowercase , lowercase , lowercase ) def _snake_case ( self ) -> str: lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase ) def _snake_case ( self ) -> Dict: lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*lowercase ) @slow def _snake_case ( self ) -> Union[str, Any]: for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase = MobileViTVaModel.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) def UpperCAmelCase__ ( ): '''simple docstring''' lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class lowercase ( unittest.TestCase ): @cached_property def _snake_case ( self ) -> List[str]: return ( MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ) if is_vision_available() else None ) @slow def _snake_case ( self ) -> List[Any]: lowerCAmelCase = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to( lowercase ) lowerCAmelCase = self.default_image_processor lowerCAmelCase = prepare_img() lowerCAmelCase = image_processor(images=lowercase , return_tensors="""pt""" ).to(lowercase ) # forward pass with torch.no_grad(): lowerCAmelCase = model(**lowercase ) # verify the logits lowerCAmelCase = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , lowercase ) lowerCAmelCase = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] ).to(lowercase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1e-4 ) ) @slow def _snake_case ( self ) -> Dict: lowerCAmelCase = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) lowerCAmelCase = model.to(lowercase ) lowerCAmelCase = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) lowerCAmelCase = prepare_img() lowerCAmelCase = image_processor(images=lowercase , return_tensors="""pt""" ).to(lowercase ) # forward pass with torch.no_grad(): lowerCAmelCase = model(**lowercase ) lowerCAmelCase = outputs.logits # verify the logits lowerCAmelCase = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , lowercase ) lowerCAmelCase = torch.tensor( [ [[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]], [[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]], [[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]], ] , device=lowercase , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowercase , atol=1e-4 ) ) @slow def _snake_case ( self ) -> Dict: lowerCAmelCase = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) lowerCAmelCase = model.to(lowercase ) lowerCAmelCase = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) lowerCAmelCase = prepare_img() lowerCAmelCase = image_processor(images=lowercase , return_tensors="""pt""" ).to(lowercase ) # forward pass with torch.no_grad(): lowerCAmelCase = model(**lowercase ) lowerCAmelCase = outputs.logits.detach().cpu() lowerCAmelCase = image_processor.post_process_semantic_segmentation(outputs=lowercase , target_sizes=[(50, 60)] ) lowerCAmelCase = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , lowercase ) lowerCAmelCase = image_processor.post_process_semantic_segmentation(outputs=lowercase ) lowerCAmelCase = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , lowercase )
46
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging A_ : Any = logging.get_logger(__name__) A_ : Any = { """microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""", """microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""", } class lowercase ( _lowerCamelCase ): """simple docstring""" UpperCAmelCase = """markuplm""" def __init__( self ,a_=30_522 ,a_=768 ,a_=12 ,a_=12 ,a_=3_072 ,a_="gelu" ,a_=0.1 ,a_=0.1 ,a_=512 ,a_=2 ,a_=0.02 ,a_=1E-1_2 ,a_=0 ,a_=0 ,a_=2 ,a_=256 ,a_=1_024 ,a_=216 ,a_=1_001 ,a_=32 ,a_=50 ,a_="absolute" ,a_=True ,a_=None ,**a_ ,) -> Union[str, Any]: super().__init__( pad_token_id=a_ ,bos_token_id=a_ ,eos_token_id=a_ ,**a_ ,) _UpperCAmelCase : Optional[int] = vocab_size _UpperCAmelCase : Tuple = hidden_size _UpperCAmelCase : str = num_hidden_layers _UpperCAmelCase : Dict = num_attention_heads _UpperCAmelCase : int = hidden_act _UpperCAmelCase : Optional[Any] = intermediate_size _UpperCAmelCase : Tuple = hidden_dropout_prob _UpperCAmelCase : List[str] = attention_probs_dropout_prob _UpperCAmelCase : Optional[int] = max_position_embeddings _UpperCAmelCase : Tuple = type_vocab_size _UpperCAmelCase : Dict = initializer_range _UpperCAmelCase : List[Any] = layer_norm_eps _UpperCAmelCase : Optional[Any] = position_embedding_type _UpperCAmelCase : Any = use_cache _UpperCAmelCase : List[Any] = classifier_dropout # additional properties _UpperCAmelCase : Dict = max_depth _UpperCAmelCase : Union[str, Any] = max_xpath_tag_unit_embeddings _UpperCAmelCase : Optional[int] = max_xpath_subs_unit_embeddings _UpperCAmelCase : List[Any] = tag_pad_id _UpperCAmelCase : Tuple = subs_pad_id _UpperCAmelCase : List[str] = xpath_unit_hidden_size
215
0
import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor _lowerCAmelCase : Tuple = logging.get_logger(__name__) class __magic_name__ ( a_ ): """simple docstring""" def __init__( self :List[Any] , *snake_case :Dict , **snake_case :int ): '''simple docstring''' warnings.warn( "The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use BeitImageProcessor instead." , lowercase_ , ) super().__init__(*lowercase_ , **lowercase_ )
364
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _lowerCAmelCase : Dict = { '''configuration_blip''': [ '''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BlipConfig''', '''BlipTextConfig''', '''BlipVisionConfig''', ], '''processing_blip''': ['''BlipProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : str = ['''BlipImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Union[str, Any] = [ '''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BlipModel''', '''BlipPreTrainedModel''', '''BlipForConditionalGeneration''', '''BlipForQuestionAnswering''', '''BlipVisionModel''', '''BlipTextModel''', '''BlipForImageTextRetrieval''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Dict = [ '''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFBlipModel''', '''TFBlipPreTrainedModel''', '''TFBlipForConditionalGeneration''', '''TFBlipForQuestionAnswering''', '''TFBlipVisionModel''', '''TFBlipTextModel''', '''TFBlipForImageTextRetrieval''', ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys _lowerCAmelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
70
0
'''simple docstring''' from __future__ import annotations def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ , ) -> None: """simple docstring""" UpperCamelCase = len(A__ ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(A__ ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , A__ , A__ , ) def __lowerCamelCase ( A__ ) -> None: """simple docstring""" UpperCamelCase = [] depth_first_search([] , [] , [] , A__ , A__ ) # Print all the boards for board in boards: for column in board: print(A__ ) print('' ) print(len(A__ ) , 'solutions were found.' ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
28
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) lowerCAmelCase__ : List[str] = { '''configuration_speecht5''': [ '''SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP''', '''SpeechT5Config''', '''SpeechT5HifiGanConfig''', ], '''feature_extraction_speecht5''': ['''SpeechT5FeatureExtractor'''], '''processing_speecht5''': ['''SpeechT5Processor'''], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : str = ['''SpeechT5Tokenizer'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : str = [ '''SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST''', '''SpeechT5ForSpeechToText''', '''SpeechT5ForSpeechToSpeech''', '''SpeechT5ForTextToSpeech''', '''SpeechT5Model''', '''SpeechT5PreTrainedModel''', '''SpeechT5HifiGan''', ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys lowerCAmelCase__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
143
0
'''simple docstring''' # Imports import numpy as np class a_ : def __init__( self : Tuple , lowercase : Optional[int]=None , lowercase : Union[str, Any]=None , lowercase : Dict=None , lowercase : Dict=None , lowercase : Optional[int]=None ): """simple docstring""" self.set_matricies(red=lowercase , green=lowercase , blue=lowercase , red_edge=lowercase , nir=lowercase ) def lowercase__ ( self : Dict , lowercase : Optional[Any]=None , lowercase : Dict=None , lowercase : List[Any]=None , lowercase : str=None , lowercase : Union[str, Any]=None ): """simple docstring""" if red is not None: lowercase_ :Dict = red if green is not None: lowercase_ :Union[str, Any] = green if blue is not None: lowercase_ :Union[str, Any] = blue if red_edge is not None: lowercase_ :Any = red_edge if nir is not None: lowercase_ :str = nir return True def lowercase__ ( self : Tuple , lowercase : str="" , lowercase : Union[str, Any]=None , lowercase : Union[str, Any]=None , lowercase : Any=None , lowercase : Union[str, Any]=None , lowercase : Optional[Any]=None ): """simple docstring""" self.set_matricies(red=lowercase , green=lowercase , blue=lowercase , red_edge=lowercase , nir=lowercase ) lowercase_ :Optional[Any] = { "ARVI2": self.arvaa, "CCCI": self.ccci, "CVI": self.cvi, "GLI": self.gli, "NDVI": self.ndvi, "BNDVI": self.bndvi, "redEdgeNDVI": self.red_edge_ndvi, "GNDVI": self.gndvi, "GBNDVI": self.gbndvi, "GRNDVI": self.grndvi, "RBNDVI": self.rbndvi, "PNDVI": self.pndvi, "ATSAVI": self.atsavi, "BWDRVI": self.bwdrvi, "CIgreen": self.ci_green, "CIrededge": self.ci_rededge, "CI": self.ci, "CTVI": self.ctvi, "GDVI": self.gdvi, "EVI": self.evi, "GEMI": self.gemi, "GOSAVI": self.gosavi, "GSAVI": self.gsavi, "Hue": self.hue, "IVI": self.ivi, "IPVI": self.ipvi, "I": self.i, "RVI": self.rvi, "MRVI": self.mrvi, "MSAVI": self.m_savi, "NormG": self.norm_g, "NormNIR": self.norm_nir, "NormR": self.norm_r, "NGRDI": self.ngrdi, "RI": self.ri, "S": self.s, "IF": self._if, "DVI": self.dvi, "TVI": self.tvi, "NDRE": self.ndre, } try: return funcs[index]() except KeyError: print("Index not in the list!" ) return False def lowercase__ ( self : Dict ): """simple docstring""" return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red))) def lowercase__ ( self : int ): """simple docstring""" return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / ( (self.nir - self.red) / (self.nir + self.red) ) def lowercase__ ( self : List[Any] ): """simple docstring""" return self.nir * (self.red / (self.green**2)) def lowercase__ ( self : Optional[int] ): """simple docstring""" return (2 * self.green - self.red - self.blue) / ( 2 * self.green + self.red + self.blue ) def lowercase__ ( self : List[str] ): """simple docstring""" return (self.nir - self.red) / (self.nir + self.red) def lowercase__ ( self : Union[str, Any] ): """simple docstring""" return (self.nir - self.blue) / (self.nir + self.blue) def lowercase__ ( self : Tuple ): """simple docstring""" return (self.redEdge - self.red) / (self.redEdge + self.red) def lowercase__ ( self : List[Any] ): """simple docstring""" return (self.nir - self.green) / (self.nir + self.green) def lowercase__ ( self : Union[str, Any] ): """simple docstring""" return (self.nir - (self.green + self.blue)) / ( self.nir + (self.green + self.blue) ) def lowercase__ ( self : int ): """simple docstring""" return (self.nir - (self.green + self.red)) / ( self.nir + (self.green + self.red) ) def lowercase__ ( self : Optional[int] ): """simple docstring""" return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red)) def lowercase__ ( self : Any ): """simple docstring""" return (self.nir - (self.green + self.red + self.blue)) / ( self.nir + (self.green + self.red + self.blue) ) def lowercase__ ( self : Any , lowercase : List[Any]=0.08 , lowercase : Tuple=1.22 , lowercase : Optional[int]=0.03 ): """simple docstring""" return a * ( (self.nir - a * self.red - b) / (a * self.nir + self.red - a * b + x * (1 + a**2)) ) def lowercase__ ( self : List[str] ): """simple docstring""" return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue) def lowercase__ ( self : Optional[int] ): """simple docstring""" return (self.nir / self.green) - 1 def lowercase__ ( self : str ): """simple docstring""" return (self.nir / self.redEdge) - 1 def lowercase__ ( self : Any ): """simple docstring""" return (self.red - self.blue) / self.red def lowercase__ ( self : str ): """simple docstring""" lowercase_ :Any = self.ndvi() return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2)) def lowercase__ ( self : str ): """simple docstring""" return self.nir - self.green def lowercase__ ( self : Optional[int] ): """simple docstring""" return 2.5 * ( (self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1) ) def lowercase__ ( self : Any ): """simple docstring""" lowercase_ :List[Any] = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / ( self.nir + self.red + 0.5 ) return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red) def lowercase__ ( self : List[Any] , lowercase : Union[str, Any]=0.16 ): """simple docstring""" return (self.nir - self.green) / (self.nir + self.green + y) def lowercase__ ( self : Optional[int] , lowercase : Optional[Any]=0.5 ): """simple docstring""" return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n) def lowercase__ ( self : str ): """simple docstring""" return np.arctan( ((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) ) def lowercase__ ( self : List[str] , lowercase : Union[str, Any]=None , lowercase : str=None ): """simple docstring""" return (self.nir - b) / (a * self.red) def lowercase__ ( self : str ): """simple docstring""" return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1) def lowercase__ ( self : Any ): """simple docstring""" return (self.red + self.green + self.blue) / 30.5 def lowercase__ ( self : Optional[Any] ): """simple docstring""" return self.nir / self.red def lowercase__ ( self : Optional[int] ): """simple docstring""" return (self.rvi() - 1) / (self.rvi() + 1) def lowercase__ ( self : Tuple ): """simple docstring""" return ( (2 * self.nir + 1) - ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2) ) / 2 def lowercase__ ( self : str ): """simple docstring""" return self.green / (self.nir + self.red + self.green) def lowercase__ ( self : Optional[int] ): """simple docstring""" return self.nir / (self.nir + self.red + self.green) def lowercase__ ( self : Tuple ): """simple docstring""" return self.red / (self.nir + self.red + self.green) def lowercase__ ( self : Optional[int] ): """simple docstring""" return (self.green - self.red) / (self.green + self.red) def lowercase__ ( self : str ): """simple docstring""" return (self.red - self.green) / (self.red + self.green) def lowercase__ ( self : int ): """simple docstring""" lowercase_ :Dict = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] ) lowercase_ :Optional[int] = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] ) return (max_value - min_value) / max_value def lowercase__ ( self : List[str] ): """simple docstring""" return (2 * self.red - self.green - self.blue) / (self.green - self.blue) def lowercase__ ( self : str ): """simple docstring""" return self.nir / self.red def lowercase__ ( self : Dict ): """simple docstring""" return (self.ndvi() + 0.5) ** (1 / 2) def lowercase__ ( self : List[Any] ): """simple docstring""" return (self.nir - self.redEdge) / (self.nir + self.redEdge)
147
'''simple docstring''' from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging lowerCAmelCase : Tuple =logging.get_logger(__name__) def UpperCAmelCase_ ( __lowerCamelCase : Union[tf.Tensor, np.ndarray] ): if isinstance(__lowerCamelCase ,np.ndarray ): return list(tensor.shape ) lowercase_ :Optional[int] = tf.shape(__lowerCamelCase ) if tensor.shape == tf.TensorShape(__lowerCamelCase ): return dynamic lowercase_ :Union[str, Any] = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(__lowerCamelCase )] def UpperCAmelCase_ ( __lowerCamelCase : tf.Tensor ,__lowerCamelCase : Optional[int] = None ,__lowerCamelCase : Optional[str] = None ): return tf.nn.softmax(logits=logits + 1e-9 ,axis=__lowerCamelCase ,name=__lowerCamelCase ) def UpperCAmelCase_ ( __lowerCamelCase : str ,__lowerCamelCase : List[Any] ,__lowerCamelCase : Any ,__lowerCamelCase : List[str]=1e-5 ,__lowerCamelCase : List[str]=-1 ): # This is a very simplified functional layernorm, designed to duplicate # the functionality of PyTorch nn.functional.layer_norm when this is needed to port # models in Transformers. if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(__lowerCamelCase ,__lowerCamelCase ): raise NotImplementedError("Only 1D weight and bias tensors are supported for now, with only a single axis." ) # Get mean and variance on the axis to be normalized lowercase_ , lowercase_ :List[str] = tf.nn.moments(__lowerCamelCase ,axes=[axis] ,keepdims=__lowerCamelCase ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis lowercase_ :Union[str, Any] = [1] * inputs.shape.rank lowercase_ :Optional[Any] = shape_list(__lowerCamelCase )[axis] lowercase_ :List[str] = tf.reshape(__lowerCamelCase ,__lowerCamelCase ) lowercase_ :Dict = tf.reshape(__lowerCamelCase ,__lowerCamelCase ) # Compute layer normalization using the batch_normalization # function. lowercase_ :Union[str, Any] = tf.nn.batch_normalization( __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,offset=__lowerCamelCase ,scale=__lowerCamelCase ,variance_epsilon=__lowerCamelCase ,) return outputs def UpperCAmelCase_ ( __lowerCamelCase : Optional[int] ,__lowerCamelCase : Union[str, Any]=0 ,__lowerCamelCase : Dict=-1 ): # Replicates the behavior of torch.flatten in TF # If end_dim or start_dim is negative, count them from the end if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input lowercase_ :Optional[int] = tf.shape(__lowerCamelCase ) lowercase_ :Optional[int] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) lowercase_ :List[str] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] ,axis=0 ) return tf.reshape(__lowerCamelCase ,__lowerCamelCase ) def UpperCAmelCase_ ( __lowerCamelCase : tf.Tensor ): if not isinstance(__lowerCamelCase ,tf.Tensor ): lowercase_ :str = tf.convert_to_tensor(__lowerCamelCase ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: lowercase_ :List[Any] = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: lowercase_ :Optional[int] = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) lowercase_ :str = ( tf.cast(1 ,encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def UpperCAmelCase_ ( __lowerCamelCase : tf.Tensor ,__lowerCamelCase : int ,__lowerCamelCase : str = "input_ids" ): tf.debugging.assert_less( __lowerCamelCase ,tf.cast(__lowerCamelCase ,dtype=tensor.dtype ) ,message=( F'The maximum value of {tensor_name} ({tf.math.reduce_max(__lowerCamelCase )}) must be smaller than the embedding ' F'layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.' ) ,) def UpperCAmelCase_ ( __lowerCamelCase : List[str] ,__lowerCamelCase : Tuple ,__lowerCamelCase : Dict ): lowercase_ :int = 6_45_12 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. lowercase_ :Union[str, Any] = [x for x in data if len(__lowerCamelCase ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( "The following attributes cannot be saved to HDF5 file because " F'they are larger than {HDF5_OBJECT_HEADER_LIMIT} ' F'bytes: {bad_attributes}' ) lowercase_ :Union[str, Any] = np.asarray(__lowerCamelCase ) lowercase_ :Optional[int] = 1 lowercase_ :int = np.array_split(__lowerCamelCase ,__lowerCamelCase ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 lowercase_ :List[Any] = np.array_split(__lowerCamelCase ,__lowerCamelCase ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(__lowerCamelCase ): lowercase_ :int = chunk_data else: lowercase_ :Tuple = data def UpperCAmelCase_ ( __lowerCamelCase : str ,__lowerCamelCase : Tuple ): if name in group.attrs: lowercase_ :Optional[Any] = [n.decode("utf8" ) if hasattr(__lowerCamelCase ,"decode" ) else n for n in group.attrs[name]] else: lowercase_ :List[str] = [] lowercase_ :str = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode("utf8" ) if hasattr(__lowerCamelCase ,"decode" ) else n for n in group.attrs["%s%d" % (name, chunk_id)]] ) chunk_id += 1 return data def UpperCAmelCase_ ( __lowerCamelCase : str ): def _expand_single_ad_tensor(__lowerCamelCase : Tuple ): if isinstance(__lowerCamelCase ,tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(__lowerCamelCase ,axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor ,__lowerCamelCase )
147
1
import datasets from .evaluate import evaluate UpperCamelCase__ = """\ @inproceedings{Rajpurkar2016SQuAD10, title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text}, author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang}, booktitle={EMNLP}, year={2016} } """ UpperCamelCase__ = """ This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD). Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span, from the corresponding reading passage, or the question might be unanswerable. """ UpperCamelCase__ = """ Computes SQuAD scores (F1 and EM). Args: predictions: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair as given in the references (see below) - 'prediction_text': the text of the answer references: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair (see above), - 'answers': a Dict in the SQuAD dataset format { 'text': list of possible texts for the answer, as a list of strings 'answer_start': list of start positions for the answer, as a list of ints } Note that answer_start values are not taken into account to compute the metric. Returns: 'exact_match': Exact match (the normalized answer exactly match the gold answer) 'f1': The F-score of predicted tokens versus the gold answer Examples: >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}] >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}] >>> squad_metric = datasets.load_metric(\"squad\") >>> results = squad_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 100.0, 'f1': 100.0} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a__ ( datasets.Metric ): def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": {"id": datasets.Value("string" ), "prediction_text": datasets.Value("string" )}, "references": { "id": datasets.Value("string" ), "answers": datasets.features.Sequence( { "text": datasets.Value("string" ), "answer_start": datasets.Value("int32" ), } ), }, } ) , codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , ) def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" __lowerCAmelCase = {prediction["id"]: prediction["prediction_text"] for prediction in predictions} __lowerCAmelCase = [ { "paragraphs": [ { "qas": [ { "answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]], "id": ref["id"], } for ref in references ] } ] } ] __lowerCAmelCase = evaluate(dataset=_A , predictions=_A ) return score
92
import argparse import os import re import packaging.version UpperCamelCase__ = """examples/""" UpperCamelCase__ = { """examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""), """init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""), """setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""), """doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""), } UpperCamelCase__ = { """init""": """src/transformers/__init__.py""", """setup""": """setup.py""", } UpperCamelCase__ = """README.md""" def _a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] ): with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" , newline="\n" ) as f: __lowerCAmelCase = f.read() __lowerCAmelCase , __lowerCAmelCase = REPLACE_PATTERNS[pattern] __lowerCAmelCase = replace.replace("VERSION" , SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = re_pattern.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" , newline="\n" ) as f: f.write(SCREAMING_SNAKE_CASE_ ) def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ): for folder, directories, fnames in os.walk(SCREAMING_SNAKE_CASE_ ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("research_projects" ) if "legacy" in directories: directories.remove("legacy" ) for fname in fnames: if fname.endswith(".py" ): update_version_in_file(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , pattern="examples" ) def _a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int]=False ): for pattern, fname in REPLACE_FILES.items(): update_version_in_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if not patch: update_version_in_examples(SCREAMING_SNAKE_CASE_ ) def _a ( ): __lowerCAmelCase = "🤗 Transformers currently provides the following architectures" __lowerCAmelCase = "1. Want to contribute a new model?" with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" , newline="\n" ) as f: __lowerCAmelCase = f.readlines() # Find the start of the list. __lowerCAmelCase = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 __lowerCAmelCase = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("1." ): __lowerCAmelCase = lines[index].replace( "https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , ) index += 1 with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(SCREAMING_SNAKE_CASE_ ) def _a ( ): with open(REPLACE_FILES["init"] , "r" ) as f: __lowerCAmelCase = f.read() __lowerCAmelCase = REPLACE_PATTERNS["init"][0].search(SCREAMING_SNAKE_CASE_ ).groups()[0] return packaging.version.parse(SCREAMING_SNAKE_CASE_ ) def _a ( SCREAMING_SNAKE_CASE_ : List[Any]=False ): __lowerCAmelCase = get_version() if patch and default_version.is_devrelease: raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" ) if default_version.is_devrelease: __lowerCAmelCase = default_version.base_version elif patch: __lowerCAmelCase = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}""" else: __lowerCAmelCase = F"""{default_version.major}.{default_version.minor + 1}.0""" # Now let's ask nicely if that's the right one. __lowerCAmelCase = input(F"""Which version are you releasing? [{default_version}]""" ) if len(SCREAMING_SNAKE_CASE_ ) == 0: __lowerCAmelCase = default_version print(F"""Updating version to {version}.""" ) global_version_update(SCREAMING_SNAKE_CASE_ , patch=SCREAMING_SNAKE_CASE_ ) if not patch: print("Cleaning main README, don't forget to run `make fix-copies`." ) clean_main_ref_in_model_list() def _a ( ): __lowerCAmelCase = get_version() __lowerCAmelCase = F"""{current_version.major}.{current_version.minor + 1}.0.dev0""" __lowerCAmelCase = current_version.base_version # Check with the user we got that right. __lowerCAmelCase = input(F"""Which version are we developing now? [{dev_version}]""" ) if len(SCREAMING_SNAKE_CASE_ ) == 0: __lowerCAmelCase = dev_version print(F"""Updating version to {version}.""" ) global_version_update(SCREAMING_SNAKE_CASE_ ) print("Cleaning main README, don't forget to run `make fix-copies`." ) clean_main_ref_in_model_list() if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""") parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""") UpperCamelCase__ = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("""Nothing to do after a patch :-)""") else: post_release_work()
92
1
"""simple docstring""" import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": __snake_case = pd.read_csv('''sample_data.csv''', header=None) __snake_case = df.shape[:1][0] # If you're using some other dataset input the target column __snake_case = df.iloc[:, 1:2] __snake_case = actual_data.values.reshape(len_data, 1) __snake_case = MinMaxScaler().fit_transform(actual_data) __snake_case = 10 __snake_case = 5 __snake_case = 20 __snake_case = len_data - periods * look_back __snake_case = actual_data[:division] __snake_case = actual_data[division - look_back :] __snake_case ,__snake_case = [], [] __snake_case ,__snake_case = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) __snake_case = np.array(train_x) __snake_case = np.array(test_x) __snake_case = np.array([list(i.ravel()) for i in train_y]) __snake_case = np.array([list(i.ravel()) for i in test_y]) __snake_case = Sequential() model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(128, 1))) model.add(Dense(forward_days)) model.compile(loss='''mean_squared_error''', optimizer='''adam''') __snake_case = model.fit( x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4 ) __snake_case = model.predict(x_test)
355
"""simple docstring""" import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class __lowerCamelCase ( a__ ): '''simple docstring''' def _UpperCAmelCase ( self ) -> Optional[Any]: _a = tempfile.mkdtemp() _a = 8 # DPR tok _a = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] _a = os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase ) _a = os.path.join(__UpperCAmelCase , DPR_VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) # BART tok _a = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] _a = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) ) _a = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] _a = {'''unk_token''': '''<unk>'''} _a = os.path.join(self.tmpdirname , '''bart_tokenizer''' ) os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase ) _a = os.path.join(__UpperCAmelCase , BART_VOCAB_FILES_NAMES['''vocab_file'''] ) _a = os.path.join(__UpperCAmelCase , BART_VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__UpperCAmelCase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__UpperCAmelCase ) ) def _UpperCAmelCase ( self ) -> DPRQuestionEncoderTokenizer: return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def _UpperCAmelCase ( self ) -> DPRContextEncoderTokenizer: return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def _UpperCAmelCase ( self ) -> BartTokenizer: return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) ) def _UpperCAmelCase ( self ) -> List[Any]: shutil.rmtree(self.tmpdirname ) def _UpperCAmelCase ( self ) -> str: _a = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''text''': ['''foo''', '''bar'''], '''title''': ['''Foo''', '''Bar'''], '''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def _UpperCAmelCase ( self ) -> Optional[Any]: _a = self.get_dummy_dataset() _a = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset: _a = dataset _a = RagRetriever( __UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def _UpperCAmelCase ( self , __UpperCAmelCase ) -> int: _a = self.get_dummy_dataset() _a = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , ) if from_disk: _a = os.path.join(self.tmpdirname , '''dataset''' ) _a = os.path.join(self.tmpdirname , '''index.faiss''' ) dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) ) dataset.drop_index('''embeddings''' ) dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) ) del dataset _a = RagRetriever( __UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: _a = RagRetriever( __UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __UpperCAmelCase ) , ) return retriever def _UpperCAmelCase ( self ) -> int: _a = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''text''': ['''foo''', '''bar'''], '''title''': ['''Foo''', '''Bar'''], '''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT ) _a = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' ) dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' ) pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) ) _a = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' ) _a = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset} pickle.dump(__UpperCAmelCase , open(__UpperCAmelCase , '''wb''' ) ) _a = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , ) _a = RagRetriever( __UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def _UpperCAmelCase ( self ) -> int: _a = 1 _a = self.get_dummy_canonical_hf_index_retriever() _a = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) _a , _a , _a = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__UpperCAmelCase ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , __UpperCAmelCase ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _UpperCAmelCase ( self ) -> List[Any]: _a = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset: _a = self.get_dummy_dataset() retriever.save_pretrained(__UpperCAmelCase ) _a = RagRetriever.from_pretrained(__UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) _a = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) _a = retriever.retrieve(__UpperCAmelCase , n_docs=1 ) self.assertTrue(out is not None ) def _UpperCAmelCase ( self ) -> Dict: _a = 1 _a = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase ) _a = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) _a , _a , _a = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__UpperCAmelCase ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , __UpperCAmelCase ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _UpperCAmelCase ( self ) -> int: _a = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(__UpperCAmelCase ) _a = RagRetriever.from_pretrained(__UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) _a = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) _a = retriever.retrieve(__UpperCAmelCase , n_docs=1 ) self.assertTrue(out is not None ) def _UpperCAmelCase ( self ) -> Any: _a = 1 _a = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase ) _a = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) _a , _a , _a = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__UpperCAmelCase ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , __UpperCAmelCase ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _UpperCAmelCase ( self ) -> Tuple: _a = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(__UpperCAmelCase ) _a = RagRetriever.from_pretrained(__UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) _a = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) _a = retriever.retrieve(__UpperCAmelCase , n_docs=1 ) self.assertTrue(out is not None ) def _UpperCAmelCase ( self ) -> List[str]: _a = 1 _a = self.get_dummy_legacy_index_retriever() _a = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) _a , _a , _a = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__UpperCAmelCase ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''text'''] ) , __UpperCAmelCase ) self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _UpperCAmelCase ( self ) -> Union[str, Any]: _a = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(__UpperCAmelCase ) _a = RagRetriever.from_pretrained(__UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) _a = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) _a = retriever.retrieve(__UpperCAmelCase , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def _UpperCAmelCase ( self ) -> Any: import torch _a = 1 _a = self.get_dummy_canonical_hf_index_retriever() _a = [[5, 7], [10, 11]] _a = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) _a = retriever(__UpperCAmelCase , __UpperCAmelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCAmelCase ) _a , _a , _a = ( out['''context_input_ids'''], out['''context_attention_mask'''], out['''retrieved_doc_embeds'''], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase , np.ndarray ) _a = retriever( __UpperCAmelCase , __UpperCAmelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCAmelCase , return_tensors='''pt''' , ) _a , _a , _a , _a = ( # noqa: F841 out['''context_input_ids'''], out['''context_attention_mask'''], out['''retrieved_doc_embeds'''], out['''doc_ids'''], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(__UpperCAmelCase , torch.Tensor ) self.assertIsInstance(__UpperCAmelCase , torch.Tensor ) self.assertIsInstance(__UpperCAmelCase , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def _UpperCAmelCase ( self ) -> List[Any]: _a = self.get_dpr_ctx_encoder_tokenizer() _a = 1 _a = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase ) retriever.set_ctx_encoder_tokenizer(__UpperCAmelCase ) _a = [[5, 7], [10, 11]] _a = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) _a = retriever(__UpperCAmelCase , __UpperCAmelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCAmelCase ) self.assertEqual( len(__UpperCAmelCase ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , __UpperCAmelCase ) # check for doc token related keys in dictionary.
153
0
import copy from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING a =logging.get_logger(__name__) a ={ """microsoft/conditional-detr-resnet-50""": ( """https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json""" ), } class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : List[str] = '''conditional_detr''' _UpperCAmelCase : int = ['''past_key_values'''] _UpperCAmelCase : Optional[int] = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self : Dict ,SCREAMING_SNAKE_CASE__ : int=True ,SCREAMING_SNAKE_CASE__ : List[str]=None ,SCREAMING_SNAKE_CASE__ : List[str]=3 ,SCREAMING_SNAKE_CASE__ : int=3_0_0 ,SCREAMING_SNAKE_CASE__ : str=6 ,SCREAMING_SNAKE_CASE__ : Dict=2_0_4_8 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=8 ,SCREAMING_SNAKE_CASE__ : int=6 ,SCREAMING_SNAKE_CASE__ : Optional[int]=2_0_4_8 ,SCREAMING_SNAKE_CASE__ : List[Any]=8 ,SCREAMING_SNAKE_CASE__ : int=0.0 ,SCREAMING_SNAKE_CASE__ : Tuple=0.0 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=True ,SCREAMING_SNAKE_CASE__ : Optional[Any]="relu" ,SCREAMING_SNAKE_CASE__ : List[Any]=2_5_6 ,SCREAMING_SNAKE_CASE__ : List[Any]=0.1 ,SCREAMING_SNAKE_CASE__ : Any=0.0 ,SCREAMING_SNAKE_CASE__ : List[Any]=0.0 ,SCREAMING_SNAKE_CASE__ : str=0.02 ,SCREAMING_SNAKE_CASE__ : Tuple=1.0 ,SCREAMING_SNAKE_CASE__ : str=False ,SCREAMING_SNAKE_CASE__ : Dict="sine" ,SCREAMING_SNAKE_CASE__ : int="resnet50" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=True ,SCREAMING_SNAKE_CASE__ : str=False ,SCREAMING_SNAKE_CASE__ : Tuple=2 ,SCREAMING_SNAKE_CASE__ : Optional[int]=5 ,SCREAMING_SNAKE_CASE__ : int=2 ,SCREAMING_SNAKE_CASE__ : List[str]=1 ,SCREAMING_SNAKE_CASE__ : int=1 ,SCREAMING_SNAKE_CASE__ : str=2 ,SCREAMING_SNAKE_CASE__ : Optional[int]=5 ,SCREAMING_SNAKE_CASE__ : int=2 ,SCREAMING_SNAKE_CASE__ : Dict=0.25 ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ,): if backbone_config is not None and use_timm_backbone: raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.') if not use_timm_backbone: if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.') __lowerCamelCase : str = CONFIG_MAPPING['resnet'](out_features=['stage4']) elif isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__): __lowerCamelCase : int = backbone_config.get('model_type') __lowerCamelCase : Optional[int] = CONFIG_MAPPING[backbone_model_type] __lowerCamelCase : int = config_class.from_dict(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = use_timm_backbone __lowerCamelCase : Dict = backbone_config __lowerCamelCase : int = num_channels __lowerCamelCase : Union[str, Any] = num_queries __lowerCamelCase : List[Any] = d_model __lowerCamelCase : str = encoder_ffn_dim __lowerCamelCase : Union[str, Any] = encoder_layers __lowerCamelCase : Union[str, Any] = encoder_attention_heads __lowerCamelCase : Union[str, Any] = decoder_ffn_dim __lowerCamelCase : Optional[Any] = decoder_layers __lowerCamelCase : int = decoder_attention_heads __lowerCamelCase : Optional[Any] = dropout __lowerCamelCase : Optional[Any] = attention_dropout __lowerCamelCase : Any = activation_dropout __lowerCamelCase : int = activation_function __lowerCamelCase : Dict = init_std __lowerCamelCase : int = init_xavier_std __lowerCamelCase : Any = encoder_layerdrop __lowerCamelCase : str = decoder_layerdrop __lowerCamelCase : Dict = encoder_layers __lowerCamelCase : List[str] = auxiliary_loss __lowerCamelCase : Optional[int] = position_embedding_type __lowerCamelCase : List[str] = backbone __lowerCamelCase : Dict = use_pretrained_backbone __lowerCamelCase : Union[str, Any] = dilation # Hungarian matcher __lowerCamelCase : Dict = class_cost __lowerCamelCase : Dict = bbox_cost __lowerCamelCase : Any = giou_cost # Loss coefficients __lowerCamelCase : List[str] = mask_loss_coefficient __lowerCamelCase : Optional[int] = dice_loss_coefficient __lowerCamelCase : Any = cls_loss_coefficient __lowerCamelCase : str = bbox_loss_coefficient __lowerCamelCase : str = giou_loss_coefficient __lowerCamelCase : Optional[Any] = focal_alpha super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__) @property def lowerCAmelCase ( self : Union[str, Any]): return self.encoder_attention_heads @property def lowerCAmelCase ( self : int): return self.d_model def lowerCAmelCase ( self : str): __lowerCamelCase : Optional[int] = copy.deepcopy(self.__dict__) if self.backbone_config is not None: __lowerCamelCase : str = self.backbone_config.to_dict() __lowerCamelCase : Optional[int] = self.__class__.model_type return output class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : int = version.parse('''1.11''' ) @property def lowerCAmelCase ( self : Optional[int]): return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ('pixel_mask', {0: 'batch'}), ]) @property def lowerCAmelCase ( self : Optional[Any]): return 1E-5 @property def lowerCAmelCase ( self : str): return 1_2
73
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices a__ = logging.get_logger(__name__) a__ = { """microsoft/swin-tiny-patch4-window7-224""": ( """https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json""" ), # See all Swin models at https://huggingface.co/models?filter=swin } class snake_case ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ): '''simple docstring''' snake_case_ : Optional[Any] = """swin""" snake_case_ : Optional[Any] = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self : str , lowerCAmelCase : Optional[int]=224 , lowerCAmelCase : int=4 , lowerCAmelCase : Any=3 , lowerCAmelCase : int=96 , lowerCAmelCase : Optional[Any]=[2, 2, 6, 2] , lowerCAmelCase : Optional[Any]=[3, 6, 12, 24] , lowerCAmelCase : Tuple=7 , lowerCAmelCase : List[Any]=4.0 , lowerCAmelCase : Tuple=True , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : Tuple="gelu" , lowerCAmelCase : Any=False , lowerCAmelCase : Union[str, Any]=0.02 , lowerCAmelCase : int=1E-5 , lowerCAmelCase : Optional[Any]=32 , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Dict=None , **lowerCAmelCase : Tuple , ) -> Union[str, Any]: """simple docstring""" super().__init__(**lowerCAmelCase) _snake_case : int = image_size _snake_case : Any = patch_size _snake_case : Union[str, Any] = num_channels _snake_case : int = embed_dim _snake_case : Dict = depths _snake_case : Dict = len(lowerCAmelCase) _snake_case : Optional[Any] = num_heads _snake_case : Tuple = window_size _snake_case : int = mlp_ratio _snake_case : Any = qkv_bias _snake_case : Union[str, Any] = hidden_dropout_prob _snake_case : List[str] = attention_probs_dropout_prob _snake_case : Optional[Any] = drop_path_rate _snake_case : List[Any] = hidden_act _snake_case : str = use_absolute_embeddings _snake_case : Tuple = layer_norm_eps _snake_case : Any = initializer_range _snake_case : Union[str, Any] = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _snake_case : Dict = int(embed_dim * 2 ** (len(lowerCAmelCase) - 1)) _snake_case : Optional[Any] = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 , len(lowerCAmelCase) + 1)] _snake_case , _snake_case : List[str] = get_aligned_output_features_output_indices( out_features=lowerCAmelCase , out_indices=lowerCAmelCase , stage_names=self.stage_names) class snake_case ( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' snake_case_ : int = version.parse("""1.11""" ) @property def UpperCamelCase_ ( self : Dict) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ]) @property def UpperCamelCase_ ( self : Dict) -> float: """simple docstring""" return 1E-4
317
0
"""simple docstring""" from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL import torch from transformers import CLIPImageProcessor, CLIPVisionModel from ...models import PriorTransformer from ...pipelines import DiffusionPipeline from ...schedulers import HeunDiscreteScheduler from ...utils import ( BaseOutput, is_accelerate_available, logging, randn_tensor, replace_example_docstring, ) from .renderer import ShapERenderer __lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name __lowercase = """ Examples: ```py >>> from PIL import Image >>> import torch >>> from diffusers import DiffusionPipeline >>> from diffusers.utils import export_to_gif, load_image >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") >>> repo = \"openai/shap-e-img2img\" >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16) >>> pipe = pipe.to(device) >>> guidance_scale = 3.0 >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\" >>> image = load_image(image_url).convert(\"RGB\") >>> images = pipe( ... image, ... guidance_scale=guidance_scale, ... num_inference_steps=64, ... frame_size=256, ... ).images >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\") ``` """ @dataclass class _A ( _a ): """simple docstring""" UpperCAmelCase : Union[PIL.Image.Image, np.ndarray] class _A ( _a ): """simple docstring""" def __init__( self : int , __UpperCAmelCase : PriorTransformer , __UpperCAmelCase : CLIPVisionModel , __UpperCAmelCase : CLIPImageProcessor , __UpperCAmelCase : HeunDiscreteScheduler , __UpperCAmelCase : ShapERenderer , ): super().__init__() self.register_modules( prior=__UpperCAmelCase , image_encoder=__UpperCAmelCase , image_processor=__UpperCAmelCase , scheduler=__UpperCAmelCase , renderer=__UpperCAmelCase , ) def __snake_case ( self : Optional[int] , __UpperCAmelCase : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Any , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str]): if latents is None: a : Union[str, Any] = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase , device=__UpperCAmelCase , dtype=__UpperCAmelCase) else: if latents.shape != shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''') a : Dict = latents.to(__UpperCAmelCase) a : List[str] = latents * scheduler.init_noise_sigma return latents def __snake_case ( self : Dict , __UpperCAmelCase : Optional[int]=0): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`") a : Any = torch.device(f'''cuda:{gpu_id}''') a : Optional[Any] = [self.image_encoder, self.prior] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(__UpperCAmelCase , __UpperCAmelCase) @property def __snake_case ( self : int): if self.device != torch.device("meta") or not hasattr(self.image_encoder , "_hf_hook"): return self.device for module in self.image_encoder.modules(): if ( hasattr(__UpperCAmelCase , "_hf_hook") and hasattr(module._hf_hook , "execution_device") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device def __snake_case ( self : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[Any] , ): if isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(image[0] , torch.Tensor): a : int = torch.cat(__UpperCAmelCase , axis=0) if image[0].ndim == 4 else torch.stack(__UpperCAmelCase , axis=0) if not isinstance(__UpperCAmelCase , torch.Tensor): a : Optional[int] = self.image_processor(__UpperCAmelCase , return_tensors="pt").pixel_values[0].unsqueeze(0) a : Any = image.to(dtype=self.image_encoder.dtype , device=__UpperCAmelCase) a : Optional[int] = self.image_encoder(__UpperCAmelCase)["last_hidden_state"] a : List[str] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256 a : Tuple = image_embeds.repeat_interleave(__UpperCAmelCase , dim=0) if do_classifier_free_guidance: a : Dict = torch.zeros_like(__UpperCAmelCase) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes a : List[Any] = torch.cat([negative_image_embeds, image_embeds]) return image_embeds @torch.no_grad() @replace_example_docstring(__UpperCAmelCase) def __call__( self : str , __UpperCAmelCase : Union[PIL.Image.Image, List[PIL.Image.Image]] , __UpperCAmelCase : int = 1 , __UpperCAmelCase : int = 25 , __UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : float = 4.0 , __UpperCAmelCase : int = 64 , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , ): if isinstance(__UpperCAmelCase , PIL.Image.Image): a : Optional[Any] = 1 elif isinstance(__UpperCAmelCase , torch.Tensor): a : int = image.shape[0] elif isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image)): a : List[str] = len(__UpperCAmelCase) else: raise ValueError( f'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__UpperCAmelCase)}''') a : Optional[Any] = self._execution_device a : int = batch_size * num_images_per_prompt a : Dict = guidance_scale > 1.0 a : Optional[Any] = self._encode_image(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) # prior self.scheduler.set_timesteps(__UpperCAmelCase , device=__UpperCAmelCase) a : List[Any] = self.scheduler.timesteps a : Any = self.prior.config.num_embeddings a : Optional[int] = self.prior.config.embedding_dim a : Optional[Any] = self.prepare_latents( (batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , self.scheduler , ) # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim a : Tuple = latents.reshape(latents.shape[0] , __UpperCAmelCase , __UpperCAmelCase) for i, t in enumerate(self.progress_bar(__UpperCAmelCase)): # expand the latents if we are doing classifier free guidance a : Tuple = torch.cat([latents] * 2) if do_classifier_free_guidance else latents a : Dict = self.scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase) a : Dict = self.prior( __UpperCAmelCase , timestep=__UpperCAmelCase , proj_embedding=__UpperCAmelCase , ).predicted_image_embedding # remove the variance a , a : Union[str, Any] = noise_pred.split( scaled_model_input.shape[2] , dim=2) # batch_size, num_embeddings, embedding_dim if do_classifier_free_guidance is not None: a , a : Optional[Any] = noise_pred.chunk(2) a : Tuple = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) a : Any = self.scheduler.step( __UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , ).prev_sample if output_type == "latent": return ShapEPipelineOutput(images=__UpperCAmelCase) a : Union[str, Any] = [] for i, latent in enumerate(__UpperCAmelCase): print() a : Union[str, Any] = self.renderer.decode( latent[None, :] , __UpperCAmelCase , size=__UpperCAmelCase , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , ) images.append(__UpperCAmelCase) a : Union[str, Any] = torch.stack(__UpperCAmelCase) if output_type not in ["np", "pil"]: raise ValueError(f'''Only the output types `pil` and `np` are supported not output_type={output_type}''') a : int = images.cpu().numpy() if output_type == "pil": a : Union[str, Any] = [self.numpy_to_pil(__UpperCAmelCase) for image in images] # Offload last model to CPU if hasattr(self , "final_offload_hook") and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (images,) return ShapEPipelineOutput(images=__UpperCAmelCase)
226
"""simple docstring""" def lowercase ( A_ , A_ )-> float: '''simple docstring''' def get_matched_characters(A_ , A_ ) -> str: a : Optional[int] = [] a : List[Any] = min(len(_stra ) , len(_stra ) ) // 2 for i, l in enumerate(_stra ): a : int = int(max(0 , i - limit ) ) a : Optional[int] = int(min(i + limit + 1 , len(_stra ) ) ) if l in _stra[left:right]: matched.append(A_ ) a : int = F'''{_stra[0:_stra.index(A_ )]} {_stra[_stra.index(A_ ) + 1:]}''' return "".join(A_ ) # matching characters a : Tuple = get_matched_characters(A_ , A_ ) a : str = get_matched_characters(A_ , A_ ) a : List[str] = len(A_ ) # transposition a : Union[str, Any] = ( len([(ca, ca) for ca, ca in zip(A_ , A_ ) if ca != ca] ) // 2 ) if not match_count: a : Tuple = 0.0 else: a : List[str] = ( 1 / 3 * ( match_count / len(A_ ) + match_count / len(A_ ) + (match_count - transpositions) / match_count ) ) # common prefix up to 4 characters a : Union[str, Any] = 0 for ca, ca in zip(stra[:4] , stra[:4] ): if ca == ca: prefix_len += 1 else: break return jaro + 0.1 * prefix_len * (1 - jaro) if __name__ == "__main__": import doctest doctest.testmod() print(jaro_winkler("""hello""", """world"""))
226
1
"""simple docstring""" from __future__ import annotations import bisect def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 0 , lowerCAmelCase = -1 ) -> int: if hi < 0: UpperCAmelCase__ : Optional[Any] = len(lowerCAmelCase ) while lo < hi: UpperCAmelCase__ : Union[str, Any] = lo + (hi - lo) // 2 if sorted_collection[mid] < item: UpperCAmelCase__ : int = mid + 1 else: UpperCAmelCase__ : Union[str, Any] = mid return lo def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 0 , lowerCAmelCase = -1 ) -> int: if hi < 0: UpperCAmelCase__ : Dict = len(lowerCAmelCase ) while lo < hi: UpperCAmelCase__ : Any = lo + (hi - lo) // 2 if sorted_collection[mid] <= item: UpperCAmelCase__ : Tuple = mid + 1 else: UpperCAmelCase__ : Union[str, Any] = mid return lo def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 0 , lowerCAmelCase = -1 ) -> None: sorted_collection.insert(bisect_left(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase ) def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 0 , lowerCAmelCase = -1 ) -> None: sorted_collection.insert(bisect_right(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase ) def a__ ( lowerCAmelCase , lowerCAmelCase ) -> int | None: UpperCAmelCase__ : List[str] = 0 UpperCAmelCase__ : Optional[Any] = len(lowerCAmelCase ) - 1 while left <= right: UpperCAmelCase__ : List[Any] = left + (right - left) // 2 UpperCAmelCase__ : List[Any] = sorted_collection[midpoint] if current_item == item: return midpoint elif item < current_item: UpperCAmelCase__ : Tuple = midpoint - 1 else: UpperCAmelCase__ : List[Any] = midpoint + 1 return None def a__ ( lowerCAmelCase , lowerCAmelCase ) -> int | None: UpperCAmelCase__ : Any = bisect.bisect_left(lowerCAmelCase , lowerCAmelCase ) if index != len(lowerCAmelCase ) and sorted_collection[index] == item: return index return None def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> int | None: if right < left: return None UpperCAmelCase__ : Union[str, Any] = left + (right - left) // 2 if sorted_collection[midpoint] == item: return midpoint elif sorted_collection[midpoint] > item: return binary_search_by_recursion(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , midpoint - 1 ) else: return binary_search_by_recursion(lowerCAmelCase , lowerCAmelCase , midpoint + 1 , lowerCAmelCase ) if __name__ == "__main__": _A = input("""Enter numbers separated by comma:\n""").strip() _A = sorted(int(item) for item in user_input.split(""",""")) _A = int(input("""Enter a single number to be found in the list:\n""")) _A = binary_search(collection, target) if result is None: print(f'''{target} was not found in {collection}.''') else: print(f'''{target} was found at position {result} in {collection}.''')
171
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, logging, pipeline, ) from transformers.testing_utils import ( CaptureLogger, is_pipeline_test, require_accelerate, require_tf, require_torch, require_torch_gpu, require_torch_or_tf, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE = MODEL_FOR_CAUSAL_LM_MAPPING SCREAMING_SNAKE_CASE = TF_MODEL_FOR_CAUSAL_LM_MAPPING @require_torch def _a (self ): """simple docstring""" UpperCAmelCase__ : List[str] = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""pt""" ) # Using `do_sample=False` to force deterministic output UpperCAmelCase__ : List[str] = text_generator("""This is a test""" , do_sample=_lowerCamelCase ) self.assertEqual( _lowerCamelCase , [ { """generated_text""": ( """This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.""" """ oscope. FiliFili@@""" ) } ] , ) UpperCAmelCase__ : List[Any] = text_generator(["""This is a test""", """This is a second test"""] ) self.assertEqual( _lowerCamelCase , [ [ { """generated_text""": ( """This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.""" """ oscope. FiliFili@@""" ) } ], [ { """generated_text""": ( """This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy""" """ oscope. oscope. FiliFili@@""" ) } ], ] , ) UpperCAmelCase__ : int = text_generator("""This is a test""" , do_sample=_lowerCamelCase , num_return_sequences=2 , return_tensors=_lowerCamelCase ) self.assertEqual( _lowerCamelCase , [ {"""generated_token_ids""": ANY(_lowerCamelCase )}, {"""generated_token_ids""": ANY(_lowerCamelCase )}, ] , ) UpperCAmelCase__ : Optional[int] = text_generator.model.config.eos_token_id UpperCAmelCase__ : Any = """<pad>""" UpperCAmelCase__ : Any = text_generator( ["""This is a test""", """This is a second test"""] , do_sample=_lowerCamelCase , num_return_sequences=2 , batch_size=2 , return_tensors=_lowerCamelCase , ) self.assertEqual( _lowerCamelCase , [ [ {"""generated_token_ids""": ANY(_lowerCamelCase )}, {"""generated_token_ids""": ANY(_lowerCamelCase )}, ], [ {"""generated_token_ids""": ANY(_lowerCamelCase )}, {"""generated_token_ids""": ANY(_lowerCamelCase )}, ], ] , ) @require_tf def _a (self ): """simple docstring""" UpperCAmelCase__ : str = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""tf""" ) # Using `do_sample=False` to force deterministic output UpperCAmelCase__ : List[str] = text_generator("""This is a test""" , do_sample=_lowerCamelCase ) self.assertEqual( _lowerCamelCase , [ { """generated_text""": ( """This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵""" """ please,""" ) } ] , ) UpperCAmelCase__ : Dict = text_generator(["""This is a test""", """This is a second test"""] , do_sample=_lowerCamelCase ) self.assertEqual( _lowerCamelCase , [ [ { """generated_text""": ( """This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵""" """ please,""" ) } ], [ { """generated_text""": ( """This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes""" """ Cannes 閲閲Cannes Cannes Cannes 攵 please,""" ) } ], ] , ) def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): """simple docstring""" UpperCAmelCase__ : int = TextGenerationPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase ) return text_generator, ["This is a test", "Another test"] def _a (self ): """simple docstring""" UpperCAmelCase__ : Tuple = """Hello I believe in""" UpperCAmelCase__ : Optional[int] = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" ) UpperCAmelCase__ : Any = text_generator(_lowerCamelCase ) self.assertEqual( _lowerCamelCase , [{"""generated_text""": """Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"""}] , ) UpperCAmelCase__ : int = text_generator(_lowerCamelCase , stop_sequence=""" fe""" ) self.assertEqual(_lowerCamelCase , [{"""generated_text""": """Hello I believe in fe"""}] ) def _a (self , _lowerCamelCase , _lowerCamelCase ): """simple docstring""" UpperCAmelCase__ : Optional[int] = text_generator.model UpperCAmelCase__ : Union[str, Any] = text_generator.tokenizer UpperCAmelCase__ : Any = text_generator("""This is a test""" ) self.assertEqual(_lowerCamelCase , [{"""generated_text""": ANY(_lowerCamelCase )}] ) self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) ) UpperCAmelCase__ : List[Any] = text_generator("""This is a test""" , return_full_text=_lowerCamelCase ) self.assertEqual(_lowerCamelCase , [{"""generated_text""": ANY(_lowerCamelCase )}] ) self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] ) UpperCAmelCase__ : int = pipeline(task="""text-generation""" , model=_lowerCamelCase , tokenizer=_lowerCamelCase , return_full_text=_lowerCamelCase ) UpperCAmelCase__ : Dict = text_generator("""This is a test""" ) self.assertEqual(_lowerCamelCase , [{"""generated_text""": ANY(_lowerCamelCase )}] ) self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] ) UpperCAmelCase__ : Optional[Any] = text_generator("""This is a test""" , return_full_text=_lowerCamelCase ) self.assertEqual(_lowerCamelCase , [{"""generated_text""": ANY(_lowerCamelCase )}] ) self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) ) UpperCAmelCase__ : Union[str, Any] = text_generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=_lowerCamelCase ) self.assertEqual( _lowerCamelCase , [ [{"""generated_text""": ANY(_lowerCamelCase )}, {"""generated_text""": ANY(_lowerCamelCase )}], [{"""generated_text""": ANY(_lowerCamelCase )}, {"""generated_text""": ANY(_lowerCamelCase )}], ] , ) if text_generator.tokenizer.pad_token is not None: UpperCAmelCase__ : Union[str, Any] = text_generator( ["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=_lowerCamelCase ) self.assertEqual( _lowerCamelCase , [ [{"""generated_text""": ANY(_lowerCamelCase )}, {"""generated_text""": ANY(_lowerCamelCase )}], [{"""generated_text""": ANY(_lowerCamelCase )}, {"""generated_text""": ANY(_lowerCamelCase )}], ] , ) with self.assertRaises(_lowerCamelCase ): UpperCAmelCase__ : List[Any] = text_generator("""test""" , return_full_text=_lowerCamelCase , return_text=_lowerCamelCase ) with self.assertRaises(_lowerCamelCase ): UpperCAmelCase__ : Optional[Any] = text_generator("""test""" , return_full_text=_lowerCamelCase , return_tensors=_lowerCamelCase ) with self.assertRaises(_lowerCamelCase ): UpperCAmelCase__ : Any = text_generator("""test""" , return_text=_lowerCamelCase , return_tensors=_lowerCamelCase ) # Empty prompt is slighly special # it requires BOS token to exist. # Special case for Pegasus which will always append EOS so will # work even without BOS. if ( text_generator.tokenizer.bos_token_id is not None or "Pegasus" in tokenizer.__class__.__name__ or "Git" in model.__class__.__name__ ): UpperCAmelCase__ : Dict = text_generator("""""" ) self.assertEqual(_lowerCamelCase , [{"""generated_text""": ANY(_lowerCamelCase )}] ) else: with self.assertRaises((ValueError, AssertionError) ): UpperCAmelCase__ : str = text_generator("""""" ) if text_generator.framework == "tf": # TF generation does not support max_new_tokens, and it's impossible # to control long generation with only max_length without # fancy calculation, dismissing tests for now. return # We don't care about infinite range models. # They already work. # Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly. UpperCAmelCase__ : Tuple = ["""RwkvForCausalLM""", """XGLMForCausalLM""", """GPTNeoXForCausalLM"""] if ( tokenizer.model_max_length < 10000 and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS ): # Handling of large generations with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ): text_generator("""This is a test""" * 500 , max_new_tokens=20 ) UpperCAmelCase__ : str = text_generator("""This is a test""" * 500 , handle_long_generation="""hole""" , max_new_tokens=20 ) # Hole strategy cannot work with self.assertRaises(_lowerCamelCase ): text_generator( """This is a test""" * 500 , handle_long_generation="""hole""" , max_new_tokens=tokenizer.model_max_length + 10 , ) @require_torch @require_accelerate @require_torch_gpu def _a (self ): """simple docstring""" import torch # Classic `model_kwargs` UpperCAmelCase__ : str = pipeline( model="""hf-internal-testing/tiny-random-bloom""" , model_kwargs={"""device_map""": """auto""", """torch_dtype""": torch.bfloataa} , ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) UpperCAmelCase__ : List[str] = pipe("""This is a test""" ) self.assertEqual( _lowerCamelCase , [ { """generated_text""": ( """This is a test test test test test test test test test test test test test test test test""" """ test""" ) } ] , ) # Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.) UpperCAmelCase__ : int = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.bfloataa ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) UpperCAmelCase__ : Any = pipe("""This is a test""" ) self.assertEqual( _lowerCamelCase , [ { """generated_text""": ( """This is a test test test test test test test test test test test test test test test test""" """ test""" ) } ] , ) # torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602 UpperCAmelCase__ : Optional[Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa ) UpperCAmelCase__ : Optional[int] = pipe("""This is a test""" ) self.assertEqual( _lowerCamelCase , [ { """generated_text""": ( """This is a test test test test test test test test test test test test test test test test""" """ test""" ) } ] , ) @require_torch @require_torch_gpu def _a (self ): """simple docstring""" import torch UpperCAmelCase__ : Any = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device=0 , torch_dtype=torch.floataa ) pipe("""This is a test""" ) @require_torch @require_accelerate @require_torch_gpu def _a (self ): """simple docstring""" import torch UpperCAmelCase__ : Any = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.floataa ) pipe("""This is a test""" , do_sample=_lowerCamelCase , top_p=0.5 ) def _a (self ): """simple docstring""" UpperCAmelCase__ : Optional[Any] = """Hello world""" UpperCAmelCase__ : str = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" ) if text_generator.model.framework == "tf": UpperCAmelCase__ : Any = logging.get_logger("""transformers.generation.tf_utils""" ) else: UpperCAmelCase__ : Union[str, Any] = logging.get_logger("""transformers.generation.utils""" ) UpperCAmelCase__ : Optional[int] = """Both `max_new_tokens`""" # The beggining of the message to be checked in this test # Both are set by the user -> log warning with CaptureLogger(_lowerCamelCase ) as cl: UpperCAmelCase__ : List[str] = text_generator(_lowerCamelCase , max_length=10 , max_new_tokens=1 ) self.assertIn(_lowerCamelCase , cl.out ) # The user only sets one -> no warning with CaptureLogger(_lowerCamelCase ) as cl: UpperCAmelCase__ : Any = text_generator(_lowerCamelCase , max_new_tokens=1 ) self.assertNotIn(_lowerCamelCase , cl.out ) with CaptureLogger(_lowerCamelCase ) as cl: UpperCAmelCase__ : Optional[Any] = text_generator(_lowerCamelCase , max_length=10 ) self.assertNotIn(_lowerCamelCase , cl.out )
171
1
'''simple docstring''' import os import unittest from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class a_ ( _UpperCAmelCase , unittest.TestCase ): __A = TransfoXLTokenizer __A = False __A = False def lowercase__ ( self : Union[str, Any] ): """simple docstring""" super().setUp() lowercase_ :int = [ '''<unk>''', '''[CLS]''', '''[SEP]''', '''want''', '''unwanted''', '''wa''', '''un''', '''running''', ''',''', '''low''', '''l''', ] lowercase_ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) def lowercase__ ( self : int , **lowercase : Optional[int] ): """simple docstring""" lowercase_ :Tuple = True return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def lowercase__ ( self : Optional[Any] , lowercase : Tuple ): """simple docstring""" lowercase_ :List[Any] = '''<unk> UNwanted , running''' lowercase_ :Optional[Any] = '''<unk> unwanted, running''' return input_text, output_text def lowercase__ ( self : Union[str, Any] ): """simple docstring""" lowercase_ :str = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=_UpperCAmelCase ) lowercase_ :Dict = tokenizer.tokenize("<unk> UNwanted , running" ) self.assertListEqual(_UpperCAmelCase , ["<unk>", "unwanted", ",", "running"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [0, 4, 8, 7] ) def lowercase__ ( self : Any ): """simple docstring""" lowercase_ :Optional[int] = TransfoXLTokenizer(lower_case=_UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ) , ["hello", "!", "how", "are", "you", "?"] ) def lowercase__ ( self : Any ): """simple docstring""" lowercase_ :str = TransfoXLTokenizer(lower_case=_UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def lowercase__ ( self : Any ): """simple docstring""" lowercase_ :Optional[Any] = TransfoXLTokenizer(lower_case=_UpperCAmelCase ) lowercase_ :Optional[Any] = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?''' lowercase_ :Optional[Any] = [ '''Hello''', '''(''', '''bracket''', ''')''', '''and''', '''side''', '''@-@''', '''scrolled''', '''[''', '''and''', ''']''', '''Henry''', '''\'s''', '''$''', '''5''', '''@,@''', '''000''', '''with''', '''3''', '''@.@''', '''34''', '''m''', '''.''', '''What''', '''\'s''', '''up''', '''!''', '''?''', ] self.assertListEqual(tokenizer.tokenize(_UpperCAmelCase ) , _UpperCAmelCase ) self.assertEqual(tokenizer.convert_tokens_to_string(_UpperCAmelCase ) , _UpperCAmelCase ) def lowercase__ ( self : List[str] ): """simple docstring""" lowercase_ :Optional[int] = self.get_tokenizer() lowercase_ :int = len(_UpperCAmelCase ) tokenizer.add_tokens(["new1", "new2"] ) tokenizer.move_added_token("new1" , 1 ) # Check that moved token is not copied (duplicate) self.assertEqual(len(_UpperCAmelCase ) , original_len + 2 ) # Check that token is moved to specified id self.assertEqual(tokenizer.encode("new1" ) , [1] ) self.assertEqual(tokenizer.decode([1] ) , "new1" )
352
'''simple docstring''' from abc import ABC, abstractmethod from argparse import ArgumentParser class a_ ( _lowerCAmelCase ): @staticmethod @abstractmethod def lowercase__ ( lowercase : ArgumentParser ): """simple docstring""" raise NotImplementedError() @abstractmethod def lowercase__ ( self : str ): """simple docstring""" raise NotImplementedError()
147
0
def _A ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ): # Check if the input is valid if not len(UpperCamelCase__ ) == len(UpperCamelCase__ ) == 3: raise ValueError('''Please enter a valid equation.''' ) if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError('''Both a & b of two equations can\'t be zero.''' ) # Extract the coefficients UpperCamelCase :Any = equationa UpperCamelCase :Tuple = equationa # Calculate the determinants of the matrices UpperCamelCase :int = aa * ba - aa * ba UpperCamelCase :str = ca * ba - ca * ba UpperCamelCase :str = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError('''Infinite solutions. (Consistent system)''' ) else: raise ValueError('''No solution. (Inconsistent system)''' ) else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: UpperCamelCase :Dict = determinant_x / determinant UpperCamelCase :str = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
259
"""simple docstring""" import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device _snake_case = False class UpperCamelCase ( unittest.TestCase ): pass @nightly @require_torch_gpu class UpperCamelCase ( unittest.TestCase ): def _lowercase ( self : Optional[Any] ) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase ( self : Tuple ) -> List[Any]: _a : Tuple = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa ) pipe.to(UpperCAmelCase__ ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) _a : List[str] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" ) _a : Optional[Any] = torch.manual_seed(0 ) _a : Union[str, Any] = pipe.dual_guided( prompt="""first prompt""" , image=UpperCAmelCase__ , text_to_image_strength=0.7_5 , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(UpperCAmelCase__ ) _a : Dict = VersatileDiffusionPipeline.from_pretrained(UpperCAmelCase__ , torch_dtype=torch.floataa ) pipe.to(UpperCAmelCase__ ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) _a : Optional[Any] = generator.manual_seed(0 ) _a : str = pipe.dual_guided( prompt="""first prompt""" , image=UpperCAmelCase__ , text_to_image_strength=0.7_5 , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def _lowercase ( self : Optional[int] ) -> Optional[int]: _a : Optional[int] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa ) pipe.to(UpperCAmelCase__ ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) _a : int = """cyberpunk 2077""" _a : Tuple = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" ) _a : Tuple = torch.manual_seed(0 ) _a : Any = pipe.dual_guided( prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , text_to_image_strength=0.7_5 , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images _a : List[str] = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) _a : Optional[int] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 _a : int = """A painting of a squirrel eating a burger """ _a : Tuple = torch.manual_seed(0 ) _a : Union[str, Any] = pipe.text_to_image( prompt=UpperCAmelCase__ , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images _a : int = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) _a : int = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 _a : str = pipe.image_variation(UpperCAmelCase__ , generator=UpperCAmelCase__ , output_type="""numpy""" ).images _a : str = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) _a : Optional[Any] = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
294
0
'''simple docstring''' # Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position __lowerCamelCase = '''2.13.1''' import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse('''3.7'''): raise ImportWarning( '''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.''' ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( '''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n''' '''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.''' ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip __lowerCamelCase = concatenate_datasets __lowerCamelCase = DownloadConfig __lowerCamelCase = DownloadManager __lowerCamelCase = DownloadMode __lowerCamelCase = DownloadConfig __lowerCamelCase = DownloadMode __lowerCamelCase = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
101
'''simple docstring''' import heapq as hq import math from collections.abc import Iterator class A__ : def __init__( self , UpperCamelCase__ ) -> Dict: '''simple docstring''' A_ = str(id_ ) A_ = None A_ = None A_ = [] A_ = {} # {vertex:distance} def __lt__( self , UpperCamelCase__ ) -> Optional[int]: '''simple docstring''' return self.key < other.key def __repr__( self ) -> Dict: '''simple docstring''' return self.id def snake_case_ ( self , UpperCamelCase__ ) -> Dict: '''simple docstring''' self.neighbors.append(UpperCamelCase__ ) def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]: '''simple docstring''' A_ = weight def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[int]: # add the neighbors: graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1], UpperCAmelCase__ ) graph[b - 1].add_edge(graph[a - 1], UpperCAmelCase__ ) def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> list: A_ = [] for u in graph: A_ = math.inf A_ = None A_ = 0 A_ = graph[:] while q: A_ = min(UpperCAmelCase__ ) q.remove(UpperCAmelCase__ ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): A_ = u A_ = u.edges[v.id] for i in range(1, len(UpperCAmelCase__ ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Iterator[tuple]: for u in graph: A_ = math.inf A_ = None A_ = 0 A_ = list(UpperCAmelCase__ ) hq.heapify(UpperCAmelCase__ ) while h: A_ = hq.heappop(UpperCAmelCase__ ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): A_ = u A_ = u.edges[v.id] hq.heapify(UpperCAmelCase__ ) for i in range(1, len(UpperCAmelCase__ ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def UpperCAmelCase__ ( ) -> None: pass if __name__ == "__main__": import doctest doctest.testmod()
101
1
import math import unittest def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True class lowercase ( unittest.TestCase ): def A__ ( self): self.assertTrue(is_prime(2)) self.assertTrue(is_prime(3)) self.assertTrue(is_prime(5)) self.assertTrue(is_prime(7)) self.assertTrue(is_prime(1_1)) self.assertTrue(is_prime(1_3)) self.assertTrue(is_prime(1_7)) self.assertTrue(is_prime(1_9)) self.assertTrue(is_prime(2_3)) self.assertTrue(is_prime(2_9)) def A__ ( self): with self.assertRaises(A__): is_prime(-1_9) self.assertFalse( is_prime(0) ,'''Zero doesn\'t have any positive factors, primes must have exactly two.''' ,) self.assertFalse( is_prime(1) ,'''One only has 1 positive factor, primes must have exactly two.''' ,) self.assertFalse(is_prime(2 * 2)) self.assertFalse(is_prime(2 * 3)) self.assertFalse(is_prime(3 * 3)) self.assertFalse(is_prime(3 * 5)) self.assertFalse(is_prime(3 * 5 * 7)) if __name__ == "__main__": unittest.main()
101
"""simple docstring""" import secrets from random import shuffle from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation def _lowerCAmelCase ( lowercase_ = 8 ): UpperCAmelCase = ascii_letters + digits + punctuation return "".join(secrets.choice(lowercase_ ) for _ in range(lowercase_ ) ) def _lowerCAmelCase ( lowercase_ , lowercase_ ): # Password Generator = full boot with random_number, random_letters, and # random_character FUNCTIONS # Put your code here... i -= len(lowercase_ ) UpperCAmelCase = i // 3 UpperCAmelCase = i % 3 # chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) + # random_number(digits, i / 3) + random_characters(punctuation, i / 3) UpperCAmelCase = ( chars_incl + random(lowercase_ , quotient + remainder ) + random(lowercase_ , lowercase_ ) + random(lowercase_ , lowercase_ ) ) UpperCAmelCase = list(lowercase_ ) shuffle(lowercase_ ) return "".join(lowercase_ ) # random is a generalised function for letters, characters and numbers def _lowerCAmelCase ( lowercase_ , lowercase_ ): return "".join(secrets.choice(lowercase_ ) for _ in range(lowercase_ ) ) def _lowerCAmelCase ( lowercase_ , lowercase_ ): pass # Put your code here... def _lowerCAmelCase ( lowercase_ , lowercase_ ): pass # Put your code here... def _lowerCAmelCase ( lowercase_ , lowercase_ ): pass # Put your code here... def _lowerCAmelCase ( lowercase_ , lowercase_ = 8 ): if len(lowercase_ ) < min_length: # Your Password must be at least 8 characters long return False UpperCAmelCase = any(char in ascii_uppercase for char in password ) UpperCAmelCase = any(char in ascii_lowercase for char in password ) UpperCAmelCase = any(char in digits for char in password ) UpperCAmelCase = any(char in punctuation for char in password ) return upper and lower and num and spec_char # Passwords should contain UPPERCASE, lowerase # numbers, and special characters def _lowerCAmelCase ( ): UpperCAmelCase = int(input('Please indicate the max length of your password: ' ).strip() ) UpperCAmelCase = input( 'Please indicate the characters that must be in your password: ' ).strip() print('Password generated:' , password_generator(lowercase_ ) ) print( 'Alternative Password generated:' , alternative_password_generator(lowercase_ , lowercase_ ) , ) print('[If you are thinking of using this passsword, You better save it.]' ) if __name__ == "__main__": main()
78
0
'''simple docstring''' import numpy as np from numpy import ndarray from scipy.optimize import Bounds, LinearConstraint, minimize def _A ( _lowerCAmelCase ): """simple docstring""" return np.dot(_lowerCAmelCase , _lowerCAmelCase ) class _UpperCamelCase : '''simple docstring''' def __init__( self : Dict , *, _lowerCAmelCase : float = np.inf , _lowerCAmelCase : str = "linear" , _lowerCAmelCase : float = 0.0 , ): '''simple docstring''' __lowercase =regularization __lowercase =gamma if kernel == "linear": __lowercase =self.__linear elif kernel == "rbf": if self.gamma == 0: raise ValueError('rbf kernel requires gamma') if not isinstance(self.gamma , (float, int)): raise ValueError('gamma must be float or int') if not self.gamma > 0: raise ValueError('gamma must be > 0') __lowercase =self.__rbf # in the future, there could be a default value like in sklearn # sklear: def_gamma = 1/(n_features * X.var()) (wiki) # previously it was 1/(n_features) else: __lowercase =f"""Unknown kernel: {kernel}""" raise ValueError(_lowerCAmelCase) def __lowerCamelCase ( self : List[Any] , _lowerCAmelCase : ndarray , _lowerCAmelCase : ndarray): '''simple docstring''' return np.dot(_lowerCAmelCase , _lowerCAmelCase) def __lowerCamelCase ( self : List[Any] , _lowerCAmelCase : ndarray , _lowerCAmelCase : ndarray): '''simple docstring''' return np.exp(-(self.gamma * norm_squared(vectora - vectora))) def __lowerCamelCase ( self : Union[str, Any] , _lowerCAmelCase : list[ndarray] , _lowerCAmelCase : ndarray): '''simple docstring''' __lowercase =observations __lowercase =classes # using Wolfe's Dual to calculate w. # Primal problem: minimize 1/2*norm_squared(w) # constraint: yn(w . xn + b) >= 1 # # With l a vector # Dual problem: maximize sum_n(ln) - # 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm)) # constraint: self.C >= ln >= 0 # and sum_n(ln*yn) = 0 # Then we get w using w = sum_n(ln*yn*xn) # At the end we can get b ~= mean(yn - w . xn) # # Since we use kernels, we only need l_star to calculate b # and to classify observations ((__lowercase) , ) =np.shape(_lowerCAmelCase) def to_minimize(_lowerCAmelCase : ndarray) -> float: __lowercase =0 ((__lowercase) , ) =np.shape(_lowerCAmelCase) for i in range(_lowerCAmelCase): for j in range(_lowerCAmelCase): s += ( candidate[i] * candidate[j] * classes[i] * classes[j] * self.kernel(observations[i] , observations[j]) ) return 1 / 2 * s - sum(_lowerCAmelCase) __lowercase =LinearConstraint(_lowerCAmelCase , 0 , 0) __lowercase =Bounds(0 , self.regularization) __lowercase =minimize( _lowerCAmelCase , np.ones(_lowerCAmelCase) , bounds=_lowerCAmelCase , constraints=[ly_contraint]).x __lowercase =l_star # calculating mean offset of separation plane to points __lowercase =0 for i in range(_lowerCAmelCase): for j in range(_lowerCAmelCase): s += classes[i] - classes[i] * self.optimum[i] * self.kernel( observations[i] , observations[j]) __lowercase =s / n def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : ndarray): '''simple docstring''' __lowercase =sum( self.optimum[n] * self.classes[n] * self.kernel(self.observations[n] , _lowerCAmelCase) for n in range(len(self.classes))) return 1 if s + self.offset >= 0 else -1 if __name__ == "__main__": import doctest doctest.testmod()
48
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowerCamelCase = { """configuration_efficientformer""": [ """EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """EfficientFormerConfig""", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase = ["""EfficientFormerImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase = [ """EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """EfficientFormerForImageClassification""", """EfficientFormerForImageClassificationWithTeacher""", """EfficientFormerModel""", """EfficientFormerPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase = [ """TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFEfficientFormerForImageClassification""", """TFEfficientFormerForImageClassificationWithTeacher""", """TFEfficientFormerModel""", """TFEfficientFormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientformer import EfficientFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientformer import ( EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher, EfficientFormerModel, EfficientFormerPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, TFEfficientFormerPreTrainedModel, ) else: import sys lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
48
1
'''simple docstring''' from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class a_ ( yaml.SafeLoader ): '''simple docstring''' def snake_case_( self , A ) -> Optional[int]: _SCREAMING_SNAKE_CASE = [self.constructed_objects[key_node] for key_node, _ in node.value] _SCREAMING_SNAKE_CASE = [tuple(A ) if isinstance(A , A ) else key for key in keys] _SCREAMING_SNAKE_CASE = Counter(A ) _SCREAMING_SNAKE_CASE = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(f'Got duplicate yaml keys: {duplicate_keys}' ) def snake_case_( self , A , A=False ) -> List[str]: _SCREAMING_SNAKE_CASE = super().construct_mapping(A , deep=A ) self._check_no_duplicates_on_constructed_node(A ) return mapping def lowerCamelCase ( __lowerCamelCase : str ) ->Tuple[Optional[str], str]: _SCREAMING_SNAKE_CASE = list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: _SCREAMING_SNAKE_CASE = full_content[1:].index("""---""" ) + 1 _SCREAMING_SNAKE_CASE = """\n""".join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(__lowerCamelCase ) class a_ ( snake_case_ ): '''simple docstring''' # class attributes UpperCamelCase = {'''train_eval_index'''} # train-eval-index in the YAML metadata @classmethod def snake_case_( cls , A ) -> "DatasetMetadata": with open(A , encoding="""utf-8""" ) as readme_file: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = _split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(A ) else: return cls() def snake_case_( self , A ) -> Dict: if path.exists(): with open(A , encoding="""utf-8""" ) as readme_file: _SCREAMING_SNAKE_CASE = readme_file.read() else: _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = self._to_readme(A ) with open(A , """w""" , encoding="""utf-8""" ) as readme_file: readme_file.write(A ) def snake_case_( self , A = None ) -> str: if readme_content is not None: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = _split_yaml_from_readme(A ) _SCREAMING_SNAKE_CASE = """---\n""" + self.to_yaml_string() + """---\n""" + content else: _SCREAMING_SNAKE_CASE = """---\n""" + self.to_yaml_string() + """---\n""" return full_content @classmethod def snake_case_( cls , A ) -> "DatasetMetadata": _SCREAMING_SNAKE_CASE = yaml.load(A , Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields _SCREAMING_SNAKE_CASE = { (key.replace("""-""" , """_""" ) if key.replace("""-""" , """_""" ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**A ) def snake_case_( self ) -> str: return yaml.safe_dump( { (key.replace("""_""" , """-""" ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } , sort_keys=A , allow_unicode=A , encoding="""utf-8""" , ).decode("""utf-8""" ) lowercase_ = { """image-classification""": [], """translation""": [], """image-segmentation""": [], """fill-mask""": [], """automatic-speech-recognition""": [], """token-classification""": [], """sentence-similarity""": [], """audio-classification""": [], """question-answering""": [], """summarization""": [], """zero-shot-classification""": [], """table-to-text""": [], """feature-extraction""": [], """other""": [], """multiple-choice""": [], """text-classification""": [], """text-to-image""": [], """text2text-generation""": [], """zero-shot-image-classification""": [], """tabular-classification""": [], """tabular-regression""": [], """image-to-image""": [], """tabular-to-text""": [], """unconditional-image-generation""": [], """text-retrieval""": [], """text-to-speech""": [], """object-detection""": [], """audio-to-audio""": [], """text-generation""": [], """conversational""": [], """table-question-answering""": [], """visual-question-answering""": [], """image-to-text""": [], """reinforcement-learning""": [], """voice-activity-detection""": [], """time-series-forecasting""": [], """document-question-answering""": [], } if __name__ == "__main__": from argparse import ArgumentParser lowercase_ = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""") ap.add_argument("""readme_filepath""") lowercase_ = ap.parse_args() lowercase_ = Path(args.readme_filepath) lowercase_ = DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
58
from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { 'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json', # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class _UpperCamelCase ( __A ): '''simple docstring''' lowerCamelCase__ ='vit_msn' def __init__( self : str , a : Tuple=768 , a : Tuple=12 , a : Any=12 , a : int=3072 , a : List[Any]="gelu" , a : Dict=0.0 , a : int=0.0 , a : str=0.02 , a : List[str]=1e-06 , a : List[Any]=224 , a : Union[str, Any]=16 , a : Union[str, Any]=3 , a : Tuple=True , **a : Dict , ) -> List[Any]: """simple docstring""" super().__init__(**a ) SCREAMING_SNAKE_CASE : Dict = hidden_size SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size SCREAMING_SNAKE_CASE : int = hidden_act SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : List[Any] = initializer_range SCREAMING_SNAKE_CASE : int = layer_norm_eps SCREAMING_SNAKE_CASE : Dict = image_size SCREAMING_SNAKE_CASE : Tuple = patch_size SCREAMING_SNAKE_CASE : Optional[int] = num_channels SCREAMING_SNAKE_CASE : List[str] = qkv_bias
76
0
import math from typing import Any, Callable, List, Optional, Tuple, Union import numpy as np import torch from ...models import TaFilmDecoder from ...schedulers import DDPMScheduler from ...utils import is_onnx_available, logging, randn_tensor if is_onnx_available(): from ..onnx_utils import OnnxRuntimeModel from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .continous_encoder import SpectrogramContEncoder from .notes_encoder import SpectrogramNotesEncoder _lowerCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name _lowerCamelCase = 2_56 class a ( _A ): '''simple docstring''' lowerCAmelCase : List[Any] = ['melgan'] def __init__( self : List[Any] , __snake_case : SpectrogramNotesEncoder , __snake_case : SpectrogramContEncoder , __snake_case : TaFilmDecoder , __snake_case : DDPMScheduler , __snake_case : OnnxRuntimeModel if is_onnx_available() else Any , ): super().__init__() # From MELGAN UpperCAmelCase_ = math.log(1E-5 ) # Matches MelGAN training. UpperCAmelCase_ = 4.0 # Largest value for most examples UpperCAmelCase_ = 1_28 self.register_modules( notes_encoder=__snake_case , continuous_encoder=__snake_case , decoder=__snake_case , scheduler=__snake_case , melgan=__snake_case , ) def lowerCamelCase_ ( self : str , __snake_case : Tuple , __snake_case : List[Any]=(-1.0, 1.0) , __snake_case : Tuple=False ): UpperCAmelCase_ , UpperCAmelCase_ = output_range if clip: UpperCAmelCase_ = torch.clip(__snake_case , self.min_value , self.max_value ) # Scale to [0, 1]. UpperCAmelCase_ = (features - self.min_value) / (self.max_value - self.min_value) # Scale to [min_out, max_out]. return zero_one * (max_out - min_out) + min_out def lowerCamelCase_ ( self : List[Any] , __snake_case : int , __snake_case : str=(-1.0, 1.0) , __snake_case : int=False ): UpperCAmelCase_ , UpperCAmelCase_ = input_range UpperCAmelCase_ = torch.clip(__snake_case , __snake_case , __snake_case ) if clip else outputs # Scale to [0, 1]. UpperCAmelCase_ = (outputs - min_out) / (max_out - min_out) # Scale to [self.min_value, self.max_value]. return zero_one * (self.max_value - self.min_value) + self.min_value def lowerCamelCase_ ( self : List[Any] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : str ): UpperCAmelCase_ = input_tokens > 0 UpperCAmelCase_ , UpperCAmelCase_ = self.notes_encoder( encoder_input_tokens=__snake_case , encoder_inputs_mask=__snake_case ) UpperCAmelCase_ , UpperCAmelCase_ = self.continuous_encoder( encoder_inputs=__snake_case , encoder_inputs_mask=__snake_case ) return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)] def lowerCamelCase_ ( self : Any , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : List[Any] ): UpperCAmelCase_ = noise_time if not torch.is_tensor(__snake_case ): UpperCAmelCase_ = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device ) elif torch.is_tensor(__snake_case ) and len(timesteps.shape ) == 0: UpperCAmelCase_ = timesteps[None].to(input_tokens.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML UpperCAmelCase_ = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device ) UpperCAmelCase_ = self.decoder( encodings_and_masks=__snake_case , decoder_input_tokens=__snake_case , decoder_noise_time=__snake_case ) return logits @torch.no_grad() def __call__( self : List[Any] , __snake_case : List[List[int]] , __snake_case : Optional[torch.Generator] = None , __snake_case : int = 1_00 , __snake_case : bool = True , __snake_case : str = "numpy" , __snake_case : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __snake_case : int = 1 , ): if (callback_steps is None) or ( callback_steps is not None and (not isinstance(__snake_case , __snake_case ) or callback_steps <= 0) ): raise ValueError( F'`callback_steps` has to be a positive integer but is {callback_steps} of type' F' {type(__snake_case )}.' ) UpperCAmelCase_ = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa ) UpperCAmelCase_ = np.zeros([1, 0, self.n_dims] , np.floataa ) UpperCAmelCase_ = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=__snake_case , device=self.device ) for i, encoder_input_tokens in enumerate(__snake_case ): if i == 0: UpperCAmelCase_ = torch.from_numpy(pred_mel[:1].copy() ).to( device=self.device , dtype=self.decoder.dtype ) # The first chunk has no previous context. UpperCAmelCase_ = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=__snake_case , device=self.device ) else: # The full song pipeline does not feed in a context feature, so the mask # will be all 0s after the feature converter. Because we know we're # feeding in a full context chunk from the previous prediction, set it # to all 1s. UpperCAmelCase_ = ones UpperCAmelCase_ = self.scale_features( __snake_case , output_range=[-1.0, 1.0] , clip=__snake_case ) UpperCAmelCase_ = self.encode( input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=__snake_case , continuous_mask=__snake_case , ) # Sample encoder_continuous_inputs shaped gaussian noise to begin loop UpperCAmelCase_ = randn_tensor( shape=encoder_continuous_inputs.shape , generator=__snake_case , device=self.device , dtype=self.decoder.dtype , ) # set step values self.scheduler.set_timesteps(__snake_case ) # Denoising diffusion loop for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): UpperCAmelCase_ = self.decode( encodings_and_masks=__snake_case , input_tokens=__snake_case , noise_time=t / self.scheduler.config.num_train_timesteps , ) # Compute previous output: x_t -> x_t-1 UpperCAmelCase_ = self.scheduler.step(__snake_case , __snake_case , __snake_case , generator=__snake_case ).prev_sample UpperCAmelCase_ = self.scale_to_features(__snake_case , input_range=[-1.0, 1.0] ) UpperCAmelCase_ = mel[:1] UpperCAmelCase_ = mel.cpu().float().numpy() UpperCAmelCase_ = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 ) # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(__snake_case , __snake_case ) logger.info('''Generated segment''' , __snake_case ) if output_type == "numpy" and not is_onnx_available(): raise ValueError( '''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' ) elif output_type == "numpy" and self.melgan is None: raise ValueError( '''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' ) if output_type == "numpy": UpperCAmelCase_ = self.melgan(input_features=full_pred_mel.astype(np.floataa ) ) else: UpperCAmelCase_ = full_pred_mel if not return_dict: return (output,) return AudioPipelineOutput(audios=__snake_case )
353
from __future__ import annotations import os from collections.abc import Mapping _lowerCamelCase = tuple[int, int] class a : '''simple docstring''' def __init__( self : str , __snake_case : set[int] , __snake_case : Mapping[EdgeT, int] ): UpperCAmelCase_ = vertices UpperCAmelCase_ = { (min(__snake_case ), max(__snake_case )): weight for edge, weight in edges.items() } def lowerCamelCase_ ( self : Any , __snake_case : EdgeT , __snake_case : int ): self.vertices.add(edge[0] ) self.vertices.add(edge[1] ) UpperCAmelCase_ = weight def lowerCamelCase_ ( self : Union[str, Any] ): UpperCAmelCase_ = Graph({min(self.vertices )} , {} ) UpperCAmelCase_ = 42 UpperCAmelCase_ = 42 UpperCAmelCase_ = 42 UpperCAmelCase_ = 42 while len(subgraph.vertices ) < len(self.vertices ): UpperCAmelCase_ = max(self.edges.values() ) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: UpperCAmelCase_ = edge UpperCAmelCase_ = weight subgraph.add_edge(__snake_case , __snake_case ) return subgraph def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str = "p107_network.txt" ) -> int: UpperCAmelCase_ = os.path.abspath(os.path.dirname(__UpperCamelCase ) ) UpperCAmelCase_ = os.path.join(__UpperCamelCase , __UpperCamelCase ) UpperCAmelCase_ = {} UpperCAmelCase_ = 42 UpperCAmelCase_ = 42 UpperCAmelCase_ = 42 with open(__UpperCamelCase ) as f: UpperCAmelCase_ = f.read().strip().split('''\n''' ) UpperCAmelCase_ = [line.split(''',''' ) for line in data] for edgea in range(1 , len(__UpperCamelCase ) ): for edgea in range(__UpperCamelCase ): if adjaceny_matrix[edgea][edgea] != "-": UpperCAmelCase_ = int(adjaceny_matrix[edgea][edgea] ) UpperCAmelCase_ = Graph(set(range(len(__UpperCamelCase ) ) ) , __UpperCamelCase ) UpperCAmelCase_ = graph.prims_algorithm() UpperCAmelCase_ = sum(graph.edges.values() ) UpperCAmelCase_ = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(F"{solution() = }")
177
0
"""simple docstring""" from __future__ import annotations from typing import Dict from ...configuration_utils import PretrainedConfig SCREAMING_SNAKE_CASE : Optional[Any] = { """susnato/ernie-m-base_pytorch""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json""", """susnato/ernie-m-large_pytorch""": """https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json""", } class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ ='ernie_m' lowerCamelCase__ ={"dropout": "classifier_dropout", "num_classes": "num_labels"} def __init__(self , a_ = 25_00_02 , a_ = 7_68 , a_ = 12 , a_ = 12 , a_ = 30_72 , a_ = "gelu" , a_ = 0.1 , a_ = 0.1 , a_ = 5_14 , a_ = 0.02 , a_ = 1 , a_ = 1E-05 , a_=None , a_=False , a_=0.0 , **a_ , ): '''simple docstring''' super().__init__(pad_token_id=a_ , **a_ ) __snake_case : Union[str, Any] = vocab_size __snake_case : Optional[Any] = hidden_size __snake_case : Optional[Any] = num_hidden_layers __snake_case : List[Any] = num_attention_heads __snake_case : Optional[int] = intermediate_size __snake_case : Union[str, Any] = hidden_act __snake_case : Optional[Any] = hidden_dropout_prob __snake_case : int = attention_probs_dropout_prob __snake_case : Union[str, Any] = max_position_embeddings __snake_case : Optional[Any] = initializer_range __snake_case : Any = layer_norm_eps __snake_case : str = classifier_dropout __snake_case : Optional[int] = is_decoder __snake_case : Optional[int] = act_dropout
102
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''', '''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''', '''junnyu/roformer_chinese_char_small''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json''' ), '''junnyu/roformer_chinese_char_base''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json''' ), '''junnyu/roformer_small_discriminator''': ( '''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json''' ), '''junnyu/roformer_small_generator''': ( '''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json''' ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = '''roformer''' def __init__( self : Tuple ,A_ : Optional[int]=5_0000 ,A_ : Tuple=None ,A_ : Optional[Any]=768 ,A_ : Dict=12 ,A_ : Optional[int]=12 ,A_ : Union[str, Any]=3072 ,A_ : Dict="gelu" ,A_ : Dict=0.1 ,A_ : List[Any]=0.1 ,A_ : List[Any]=1536 ,A_ : List[str]=2 ,A_ : Any=0.02 ,A_ : str=1e-12 ,A_ : Optional[int]=0 ,A_ : List[str]=False ,A_ : Tuple=True ,**A_ : List[str] ,) -> Dict: super().__init__(pad_token_id=A_ ,**A_ ) A = vocab_size A = hidden_size if embedding_size is None else embedding_size A = hidden_size A = num_hidden_layers A = num_attention_heads A = hidden_act A = intermediate_size A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = type_vocab_size A = initializer_range A = layer_norm_eps A = rotary_value A = use_cache class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": A = {0: 'batch', 1: 'choice', 2: 'sequence'} else: A = {0: 'batch', 1: 'sequence'} A = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ] )
74
0
import json import os from typing import Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', } lowerCAmelCase_ = { '''vocab_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'''}, '''merges_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'''}, } lowerCAmelCase_ = { '''ctrl''': 2_5_6, } lowerCAmelCase_ = { '''Pregnancy''': 1_6_8_6_2_9, '''Christianity''': 7_6_7_5, '''Explain''': 1_0_6_4_2_3, '''Fitness''': 6_3_4_4_0, '''Saving''': 6_3_1_6_3, '''Ask''': 2_7_1_7_1, '''Ass''': 9_5_9_8_5, '''Joke''': 1_6_3_5_0_9, '''Questions''': 4_5_6_2_2, '''Thoughts''': 4_9_6_0_5, '''Retail''': 5_2_3_4_2, '''Feminism''': 1_6_4_3_3_8, '''Writing''': 1_1_9_9_2, '''Atheism''': 1_9_2_2_6_3, '''Netflix''': 4_8_6_1_6, '''Computing''': 3_9_6_3_9, '''Opinion''': 4_3_2_1_3, '''Alone''': 4_4_9_6_7, '''Funny''': 5_8_9_1_7, '''Gaming''': 4_0_3_5_8, '''Human''': 4_0_8_8, '''India''': 1_3_3_1, '''Joker''': 7_7_1_3_8, '''Diet''': 3_6_2_0_6, '''Legal''': 1_1_8_5_9, '''Norman''': 4_9_3_9, '''Tip''': 7_2_6_8_9, '''Weight''': 5_2_3_4_3, '''Movies''': 4_6_2_7_3, '''Running''': 2_3_4_2_5, '''Science''': 2_0_9_0, '''Horror''': 3_7_7_9_3, '''Confession''': 6_0_5_7_2, '''Finance''': 1_2_2_5_0, '''Politics''': 1_6_3_6_0, '''Scary''': 1_9_1_9_8_5, '''Support''': 1_2_6_5_4, '''Technologies''': 3_2_5_1_6, '''Teenage''': 6_6_1_6_0, '''Event''': 3_2_7_6_9, '''Learned''': 6_7_4_6_0, '''Notion''': 1_8_2_7_7_0, '''Wikipedia''': 3_7_5_8_3, '''Books''': 6_6_6_5, '''Extract''': 7_6_0_5_0, '''Confessions''': 1_0_2_7_0_1, '''Conspiracy''': 7_5_9_3_2, '''Links''': 6_3_6_7_4, '''Narcissus''': 1_5_0_4_2_5, '''Relationship''': 5_4_7_6_6, '''Relationships''': 1_3_4_7_9_6, '''Reviews''': 4_1_6_7_1, '''News''': 4_2_5_6, '''Translation''': 2_6_8_2_0, '''multilingual''': 1_2_8_4_0_6, } def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]: """simple docstring""" snake_case_ : List[Any] = set() snake_case_ : Any = word[0] for char in word[1:]: pairs.add((prev_char, char) ) snake_case_ : Tuple = char snake_case_ : Optional[int] = set(_UpperCamelCase ) return pairs class __lowerCAmelCase ( _a ): lowerCamelCase_ : List[str] = VOCAB_FILES_NAMES lowerCamelCase_ : Any = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase_ : int = CONTROL_CODES def __init__(self , __magic_name__ , __magic_name__ , __magic_name__="<unk>" , **__magic_name__ ) -> Union[str, Any]: '''simple docstring''' super().__init__(unk_token=__magic_name__ , **__magic_name__ ) with open(__magic_name__ , encoding='''utf-8''' ) as vocab_handle: snake_case_ : List[Any] = json.load(__magic_name__ ) snake_case_ : int = {v: k for k, v in self.encoder.items()} with open(__magic_name__ , encoding='''utf-8''' ) as merges_handle: snake_case_ : Tuple = merges_handle.read().split('''\n''' )[1:-1] snake_case_ : Union[str, Any] = [tuple(merge.split() ) for merge in merges] snake_case_ : Union[str, Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) ) snake_case_ : str = {} @property def lowerCamelCase (self ) -> Any: '''simple docstring''' return len(self.encoder ) def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def lowerCamelCase (self , __magic_name__ ) -> List[Any]: '''simple docstring''' if token in self.cache: return self.cache[token] snake_case_ : Dict = tuple(__magic_name__ ) snake_case_ : Dict = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) snake_case_ : str = get_pairs(__magic_name__ ) if not pairs: return token while True: snake_case_ : List[str] = min(__magic_name__ , key=lambda __magic_name__ : self.bpe_ranks.get(__magic_name__ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break snake_case_ , snake_case_ : Tuple = bigram snake_case_ : Optional[Any] = [] snake_case_ : Tuple = 0 while i < len(__magic_name__ ): try: snake_case_ : List[str] = word.index(__magic_name__ , __magic_name__ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) snake_case_ : Union[str, Any] = j if word[i] == first and i < len(__magic_name__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 snake_case_ : int = tuple(__magic_name__ ) snake_case_ : Optional[int] = new_word if len(__magic_name__ ) == 1: break else: snake_case_ : List[Any] = get_pairs(__magic_name__ ) snake_case_ : Any = '''@@ '''.join(__magic_name__ ) snake_case_ : str = word[:-4] snake_case_ : Tuple = word return word def lowerCamelCase (self , __magic_name__ ) -> Any: '''simple docstring''' snake_case_ : Any = [] snake_case_ : Any = re.findall(R'''\S+\n?''' , __magic_name__ ) for token in words: split_tokens.extend(list(self.bpe(__magic_name__ ).split(''' ''' ) ) ) return split_tokens def lowerCamelCase (self , __magic_name__ ) -> Any: '''simple docstring''' return self.encoder.get(__magic_name__ , self.encoder.get(self.unk_token ) ) def lowerCamelCase (self , __magic_name__ ) -> Tuple: '''simple docstring''' return self.decoder.get(__magic_name__ , self.unk_token ) def lowerCamelCase (self , __magic_name__ ) -> Dict: '''simple docstring''' snake_case_ : int = ''' '''.join(__magic_name__ ).replace('''@@ ''' , '''''' ).strip() return out_string def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(__magic_name__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case_ : Tuple = os.path.join( __magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) snake_case_ : List[Any] = os.path.join( __magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__magic_name__ , ensure_ascii=__magic_name__ ) + '''\n''' ) snake_case_ : Optional[int] = 0 with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __magic_name__ : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ''' Please check that the tokenizer is not corrupted!''' ) snake_case_ : List[str] = token_index writer.write(''' '''.join(__magic_name__ ) + '''\n''' ) index += 1 return vocab_file, merge_file # def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True): # filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)) # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens) # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far) # return ''.join(tokens_generated_so_far)
279
import unittest import numpy as np from transformers import AlbertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.albert.modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, ) class __lowerCAmelCase ( unittest.TestCase ): def __init__(self , __magic_name__ , __magic_name__=13 , __magic_name__=7 , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=99 , __magic_name__=32 , __magic_name__=5 , __magic_name__=4 , __magic_name__=37 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=512 , __magic_name__=16 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=4 , ) -> Tuple: '''simple docstring''' snake_case_ : Union[str, Any] = parent snake_case_ : Optional[Any] = batch_size snake_case_ : List[Any] = seq_length snake_case_ : Tuple = is_training snake_case_ : List[str] = use_attention_mask snake_case_ : Any = use_token_type_ids snake_case_ : Dict = use_labels snake_case_ : Optional[Any] = vocab_size snake_case_ : Dict = hidden_size snake_case_ : List[Any] = num_hidden_layers snake_case_ : Union[str, Any] = num_attention_heads snake_case_ : Any = intermediate_size snake_case_ : Optional[int] = hidden_act snake_case_ : Optional[int] = hidden_dropout_prob snake_case_ : Optional[Any] = attention_probs_dropout_prob snake_case_ : Optional[int] = max_position_embeddings snake_case_ : Optional[int] = type_vocab_size snake_case_ : List[Any] = type_sequence_label_size snake_case_ : Dict = initializer_range snake_case_ : Dict = num_choices def lowerCamelCase (self ) -> Tuple: '''simple docstring''' snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ : Any = None if self.use_attention_mask: snake_case_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ : List[Any] = None if self.use_token_type_ids: snake_case_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case_ : List[Any] = AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowerCamelCase (self ) -> List[str]: '''simple docstring''' snake_case_ : Tuple = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[int] = config_and_inputs snake_case_ : int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict @require_flax class __lowerCAmelCase ( _a, unittest.TestCase ): lowerCamelCase_ : Optional[int] = ( ( FlaxAlbertModel, FlaxAlbertForPreTraining, FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertForQuestionAnswering, ) if is_flax_available() else () ) def lowerCamelCase (self ) -> List[str]: '''simple docstring''' snake_case_ : Optional[Any] = FlaxAlbertModelTester(self ) @slow def lowerCamelCase (self ) -> Tuple: '''simple docstring''' for model_class_name in self.all_model_classes: snake_case_ : Dict = model_class_name.from_pretrained('''albert-base-v2''' ) snake_case_ : Union[str, Any] = model(np.ones((1, 1) ) ) self.assertIsNotNone(__magic_name__ ) @require_flax class __lowerCAmelCase ( unittest.TestCase ): @slow def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Optional[Any] = FlaxAlbertModel.from_pretrained('''albert-base-v2''' ) snake_case_ : Optional[int] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) snake_case_ : Dict = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) snake_case_ : Union[str, Any] = model(__magic_name__ , attention_mask=__magic_name__ )[0] snake_case_ : Tuple = (1, 11, 768) self.assertEqual(output.shape , __magic_name__ ) snake_case_ : str = np.array( [[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __magic_name__ , atol=1e-4 ) )
279
1
'''simple docstring''' import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __UpperCAmelCase =logging.get_logger(__name__) __UpperCAmelCase ={"vocab_file": "vocab.json"} __UpperCAmelCase ={ "vocab_file": { "mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json", } } __UpperCAmelCase ={"mgp-str": 2_7} class a__ ( UpperCAmelCase__ ): lowerCamelCase : List[Any] =VOCAB_FILES_NAMES lowerCamelCase : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP lowerCamelCase : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : int , a : str , a : Optional[int]="[GO]" , a : int="[GO]" , a : Union[str, Any]="[s]" , a : Union[str, Any]="[GO]" , **a : Optional[int] ): """simple docstring""" super().__init__( unk_token=a , bos_token=a , eos_token=a , pad_token=a , **a , ) with open(a , encoding='''utf-8''' ) as vocab_handle: __lowerCamelCase = json.load(a ) __lowerCamelCase = {v: k for k, v in self.vocab.items()} @property def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" return len(self.vocab ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : str ): """simple docstring""" __lowerCamelCase = [] for s in text: char_tokens.extend(a ) return char_tokens def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : List[Any] ): """simple docstring""" return self.vocab.get(a , self.vocab.get(self.unk_token ) ) def SCREAMING_SNAKE_CASE__ ( self : int , a : Any ): """simple docstring""" return self.decoder.get(a ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : str , a : Optional[str] = None ): """simple docstring""" if not os.path.isdir(a ): logger.error('''Vocabulary path ({}) should be a directory'''.format(a ) ) return __lowerCamelCase = os.path.join( a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) with open(a , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=a , ensure_ascii=a ) + '''\n''' ) return (vocab_file,)
67
'''simple docstring''' def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]: if b == 0: return 1 if (b % 2) == 0: return actual_power(UpperCamelCase__ , int(b / 2 ) ) * actual_power(UpperCamelCase__ , int(b / 2 ) ) else: return a * actual_power(UpperCamelCase__ , int(b / 2 ) ) * actual_power(UpperCamelCase__ , int(b / 2 ) ) def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> float: if b < 0: return 1 / actual_power(UpperCamelCase__ , UpperCamelCase__ ) return actual_power(UpperCamelCase__ , UpperCamelCase__ ) if __name__ == "__main__": print(power(-2, -3))
67
1
'''simple docstring''' import pytest __lowercase : List[str] = '__dummy_dataset1__' __lowercase : List[Any] = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n' @pytest.fixture def lowerCamelCase (): return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def lowerCamelCase (): return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[str] ): __a : str = dataset_loading_script_name __a : List[str] = tmp_path / 'datasets' / script_name script_dir.mkdir(parents=_SCREAMING_SNAKE_CASE ) __a : str = script_dir / F"""{script_name}.py""" with open(_SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(_SCREAMING_SNAKE_CASE ) return str(_SCREAMING_SNAKE_CASE )
294
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowercase : Union[str, Any] = { 'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'], 'tokenization_roc_bert': ['RoCBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : List[str] = [ 'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'RoCBertForCausalLM', 'RoCBertForMaskedLM', 'RoCBertForMultipleChoice', 'RoCBertForPreTraining', 'RoCBertForQuestionAnswering', 'RoCBertForSequenceClassification', 'RoCBertForTokenClassification', 'RoCBertLayer', 'RoCBertModel', 'RoCBertPreTrainedModel', 'load_tf_weights_in_roc_bert', ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys __lowercase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
294
1
import inspect import unittest import warnings from math import ceil, floor from transformers import LevitConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_MAPPING, LevitForImageClassification, LevitForImageClassificationWithTeacher, LevitModel, ) from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class UpperCAmelCase_ ( a): def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Tuple = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(__a, "hidden_sizes")) self.parent.assertTrue(hasattr(__a, "num_attention_heads")) class UpperCAmelCase_ : def __init__( self, __a, __a=13, __a=64, __a=3, __a=3, __a=2, __a=1, __a=16, __a=[128, 256, 384], __a=[4, 6, 8], __a=[2, 3, 4], __a=[16, 16, 16], __a=0, __a=[2, 2, 2], __a=[2, 2, 2], __a=0.02, __a=True, __a=True, __a=2, ): '''simple docstring''' _lowerCAmelCase : List[str] = parent _lowerCAmelCase : Tuple = batch_size _lowerCAmelCase : Optional[int] = image_size _lowerCAmelCase : Any = num_channels _lowerCAmelCase : Dict = kernel_size _lowerCAmelCase : Optional[int] = stride _lowerCAmelCase : Tuple = padding _lowerCAmelCase : Optional[int] = hidden_sizes _lowerCAmelCase : int = num_attention_heads _lowerCAmelCase : List[str] = depths _lowerCAmelCase : Any = key_dim _lowerCAmelCase : Tuple = drop_path_rate _lowerCAmelCase : str = patch_size _lowerCAmelCase : Optional[Any] = attention_ratio _lowerCAmelCase : Union[str, Any] = mlp_ratio _lowerCAmelCase : List[Any] = initializer_range _lowerCAmelCase : Any = [ ["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] _lowerCAmelCase : int = is_training _lowerCAmelCase : Dict = use_labels _lowerCAmelCase : str = num_labels _lowerCAmelCase : Optional[Any] = initializer_range def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) _lowerCAmelCase : Tuple = None if self.use_labels: _lowerCAmelCase : Any = ids_tensor([self.batch_size], self.num_labels) _lowerCAmelCase : Union[str, Any] = self.get_config() return config, pixel_values, labels def snake_case__ ( self): '''simple docstring''' return LevitConfig( image_size=self.image_size, num_channels=self.num_channels, kernel_size=self.kernel_size, stride=self.stride, padding=self.padding, patch_size=self.patch_size, hidden_sizes=self.hidden_sizes, num_attention_heads=self.num_attention_heads, depths=self.depths, key_dim=self.key_dim, drop_path_rate=self.drop_path_rate, mlp_ratio=self.mlp_ratio, attention_ratio=self.attention_ratio, initializer_range=self.initializer_range, down_ops=self.down_ops, ) def snake_case__ ( self, __a, __a, __a): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = LevitModel(config=__a) model.to(__a) model.eval() _lowerCAmelCase : Tuple = model(__a) _lowerCAmelCase : int = (self.image_size, self.image_size) _lowerCAmelCase , _lowerCAmelCase : Tuple = image_size[0], image_size[1] for _ in range(4): _lowerCAmelCase : Any = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1) _lowerCAmelCase : List[str] = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, ceil(height / 4) * ceil(width / 4), self.hidden_sizes[-1]), ) def snake_case__ ( self, __a, __a, __a): '''simple docstring''' _lowerCAmelCase : List[Any] = self.num_labels _lowerCAmelCase : Optional[Any] = LevitForImageClassification(__a) model.to(__a) model.eval() _lowerCAmelCase : Optional[Any] = model(__a, labels=__a) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = config_and_inputs _lowerCAmelCase : Tuple = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase_ ( a , a , unittest.TestCase): lowerCamelCase__ = ( (LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher) if is_torch_available() else () ) lowerCamelCase__ = ( { 'feature-extraction': LevitModel, 'image-classification': (LevitForImageClassification, LevitForImageClassificationWithTeacher), } if is_torch_available() else {} ) lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Tuple = LevitModelTester(self) _lowerCAmelCase : Optional[Any] = ConfigTester(self, config_class=__a, has_text_modality=__a, hidden_size=37) def snake_case__ ( self): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def snake_case__ ( self): '''simple docstring''' return @unittest.skip(reason="Levit does not use inputs_embeds") def snake_case__ ( self): '''simple docstring''' pass @unittest.skip(reason="Levit does not support input and output embeddings") def snake_case__ ( self): '''simple docstring''' pass @unittest.skip(reason="Levit does not output attentions") def snake_case__ ( self): '''simple docstring''' pass def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase : Optional[Any] = model_class(__a) _lowerCAmelCase : Dict = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase : List[Any] = [*signature.parameters.keys()] _lowerCAmelCase : List[Any] = ["pixel_values"] self.assertListEqual(arg_names[:1], __a) def snake_case__ ( self): '''simple docstring''' def check_hidden_states_output(__a, __a, __a): _lowerCAmelCase : List[Any] = model_class(__a) model.to(__a) model.eval() with torch.no_grad(): _lowerCAmelCase : int = model(**self._prepare_for_class(__a, __a)) _lowerCAmelCase : str = outputs.hidden_states _lowerCAmelCase : str = len(self.model_tester.depths) + 1 self.assertEqual(len(__a), __a) _lowerCAmelCase : Optional[int] = (self.model_tester.image_size, self.model_tester.image_size) _lowerCAmelCase , _lowerCAmelCase : List[str] = image_size[0], image_size[1] for _ in range(4): _lowerCAmelCase : List[str] = floor( ( (height + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1) _lowerCAmelCase : Tuple = floor( ( (width + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-2:]), [ height * width, self.model_tester.hidden_sizes[0], ], ) _lowerCAmelCase , _lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase : str = True check_hidden_states_output(__a, __a, __a) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCAmelCase : List[str] = True check_hidden_states_output(__a, __a, __a) @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.") def snake_case__ ( self): '''simple docstring''' pass def snake_case__ ( self, __a, __a, __a=False): '''simple docstring''' _lowerCAmelCase : Dict = super()._prepare_for_class(__a, __a, return_labels=__a) if return_labels: if model_class.__name__ == "LevitForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a) def snake_case__ ( self): '''simple docstring''' if not self.model_tester.is_training: return _lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase : Dict = True for model_class in self.all_model_classes: # LevitForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(__a) or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue _lowerCAmelCase : int = model_class(__a) model.to(__a) model.train() _lowerCAmelCase : Union[str, Any] = self._prepare_for_class(__a, __a, return_labels=__a) _lowerCAmelCase : Any = model(**__a).loss loss.backward() def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return _lowerCAmelCase : int = False _lowerCAmelCase : Union[str, Any] = True for model_class in self.all_model_classes: if model_class in get_values(__a) or not model_class.supports_gradient_checkpointing: continue # LevitForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "LevitForImageClassificationWithTeacher": continue _lowerCAmelCase : Dict = model_class(__a) model.gradient_checkpointing_enable() model.to(__a) model.train() _lowerCAmelCase : Any = self._prepare_for_class(__a, __a, return_labels=__a) _lowerCAmelCase : List[Any] = model(**__a).loss loss.backward() def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase : Optional[int] = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(__a), ] or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=f"Testing {model_class} with {problem_type['title']}"): _lowerCAmelCase : List[str] = problem_type["title"] _lowerCAmelCase : Dict = problem_type["num_labels"] _lowerCAmelCase : int = model_class(__a) model.to(__a) model.train() _lowerCAmelCase : Any = self._prepare_for_class(__a, __a, return_labels=__a) if problem_type["num_labels"] > 1: _lowerCAmelCase : List[str] = inputs["labels"].unsqueeze(1).repeat(1, problem_type["num_labels"]) _lowerCAmelCase : Optional[Any] = inputs["labels"].to(problem_type["dtype"]) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=__a) as warning_list: _lowerCAmelCase : int = model(**__a).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message): raise ValueError( f"Something is going wrong in the regression problem: intercepted {w.message}") loss.backward() @slow def snake_case__ ( self): '''simple docstring''' for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase : int = LevitModel.from_pretrained(__a) self.assertIsNotNone(__a) def A ( ): '''simple docstring''' _lowerCAmelCase : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class UpperCAmelCase_ ( unittest.TestCase): @cached_property def snake_case__ ( self): '''simple docstring''' return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) @slow def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Optional[Any] = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to( __a) _lowerCAmelCase : Optional[int] = self.default_image_processor _lowerCAmelCase : Optional[int] = prepare_img() _lowerCAmelCase : int = image_processor(images=__a, return_tensors="pt").to(__a) # forward pass with torch.no_grad(): _lowerCAmelCase : Union[str, Any] = model(**__a) # verify the logits _lowerCAmelCase : Dict = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, __a) _lowerCAmelCase : Tuple = torch.tensor([1.0_448, -0.3_745, -1.8_317]).to(__a) self.assertTrue(torch.allclose(outputs.logits[0, :3], __a, atol=1E-4))
36
import argparse import copy def A ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : int = {} with open(_lowerCamelCase ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: _lowerCAmelCase : Tuple = [] _list.append([line.split()[1], line.split()[2]] ) _lowerCAmelCase : Any = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: _lowerCAmelCase : str = [] _list.append([line.split()[0], line.split()[2]] ) _lowerCAmelCase : Any = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' with open(_lowerCamelCase ) as f: _lowerCAmelCase : str = f.read(1 ) _lowerCAmelCase : str = start_node _lowerCAmelCase : List[str] = [] _lowerCAmelCase : Any = start_node _lowerCAmelCase : str = 0 while visiting not in first_solution: _lowerCAmelCase : Dict = 10_000 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(_lowerCamelCase ) and k[0] not in first_solution: _lowerCAmelCase : List[str] = k[1] _lowerCAmelCase : List[Any] = k[0] first_solution.append(_lowerCamelCase ) _lowerCAmelCase : Optional[int] = distance_of_first_solution + int(_lowerCamelCase ) _lowerCAmelCase : str = best_node first_solution.append(_lowerCamelCase ) _lowerCAmelCase : Union[str, Any] = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 _lowerCAmelCase : Tuple = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 10_000 ) return first_solution, distance_of_first_solution def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Tuple = [] for n in solution[1:-1]: _lowerCAmelCase : Dict = solution.index(_lowerCamelCase ) for kn in solution[1:-1]: _lowerCAmelCase : Dict = solution.index(_lowerCamelCase ) if n == kn: continue _lowerCAmelCase : Optional[int] = copy.deepcopy(_lowerCamelCase ) _lowerCAmelCase : int = kn _lowerCAmelCase : Dict = n _lowerCAmelCase : Optional[int] = 0 for k in _tmp[:-1]: _lowerCAmelCase : str = _tmp[_tmp.index(_lowerCamelCase ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: _lowerCAmelCase : Optional[Any] = distance + int(i[1] ) _tmp.append(_lowerCamelCase ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) _lowerCAmelCase : List[Any] = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda _lowerCamelCase : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : List[str] = 1 _lowerCAmelCase : int = first_solution _lowerCAmelCase : Tuple = [] _lowerCAmelCase : Tuple = distance_of_first_solution _lowerCAmelCase : Optional[int] = solution while count <= iters: _lowerCAmelCase : int = find_neighborhood(_lowerCamelCase , _lowerCamelCase ) _lowerCAmelCase : Tuple = 0 _lowerCAmelCase : Dict = neighborhood[index_of_best_solution] _lowerCAmelCase : int = len(_lowerCamelCase ) - 1 _lowerCAmelCase : Union[str, Any] = False while not found: _lowerCAmelCase : Tuple = 0 while i < len(_lowerCamelCase ): if best_solution[i] != solution[i]: _lowerCAmelCase : str = best_solution[i] _lowerCAmelCase : Tuple = solution[i] break _lowerCAmelCase : int = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) _lowerCAmelCase : Optional[int] = True _lowerCAmelCase : Optional[Any] = best_solution[:-1] _lowerCAmelCase : Tuple = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: _lowerCAmelCase : Union[str, Any] = cost _lowerCAmelCase : List[Any] = solution else: _lowerCAmelCase : Optional[Any] = index_of_best_solution + 1 _lowerCAmelCase : Optional[Any] = neighborhood[index_of_best_solution] if len(_lowerCamelCase ) >= size: tabu_list.pop(0 ) _lowerCAmelCase : int = count + 1 return best_solution_ever, best_cost def A ( _lowerCamelCase=None ): '''simple docstring''' _lowerCAmelCase : int = generate_neighbours(args.File ) _lowerCAmelCase , _lowerCAmelCase : List[str] = generate_first_solution( args.File , _lowerCamelCase ) _lowerCAmelCase , _lowerCAmelCase : Any = tabu_search( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , args.Iterations , args.Size , ) print(F"Best solution: {best_sol}, with total distance: {best_cost}." ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser(description="Tabu Search") parser.add_argument( "-f", "--File", type=str, help="Path to the file containing the data", required=True, ) parser.add_argument( "-i", "--Iterations", type=int, help="How many iterations the algorithm should perform", required=True, ) parser.add_argument( "-s", "--Size", type=int, help="Size of the tabu list", required=True ) # Pass the arguments to main method main(parser.parse_args())
36
1
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import evaluate import numpy as np import torch from datasets import load_dataset from PIL import Image from torchvision.transforms import ( CenterCrop, Compose, Normalize, RandomHorizontalFlip, RandomResizedCrop, Resize, ToTensor, ) import transformers from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForImageClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version _lowercase : Optional[Any] = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt") _lowercase : List[Any] = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys()) _lowercase : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) def snake_case__ ( __lowerCamelCase : str ): """simple docstring""" with open(__lowerCamelCase , '''rb''' ) as f: lowerCamelCase__ : Dict =Image.open(__lowerCamelCase ) return im.convert('''RGB''' ) @dataclass class __SCREAMING_SNAKE_CASE : '''simple docstring''' _a = field( default=lowerCAmelCase_ , metadata={ 'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).' } , ) _a = field( default=lowerCAmelCase_ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} ) _a = field(default=lowerCAmelCase_ , metadata={'help': 'A folder containing the training data.'} ) _a = field(default=lowerCAmelCase_ , metadata={'help': 'A folder containing the validation data.'} ) _a = field( default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} ) _a = field( default=lowerCAmelCase_ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) } , ) _a = field( default=lowerCAmelCase_ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) } , ) def snake_case ( self : Dict )-> str: if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None): raise ValueError( '''You must specify either a dataset name from the hub or a train and/or validation directory.''' ) @dataclass class __SCREAMING_SNAKE_CASE : '''simple docstring''' _a = field( default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , ) _a = field( default=lowerCAmelCase_ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(lowerCAmelCase_ )} , ) _a = field( default=lowerCAmelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) _a = field( default=lowerCAmelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} ) _a = field( default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , ) _a = field(default=lowerCAmelCase_ , metadata={'help': 'Name or path of preprocessor config.'} ) _a = field( default=lowerCAmelCase_ , metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) } , ) _a = field( default=lowerCAmelCase_ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , ) def snake_case__ ( __lowerCamelCase : Dict ): """simple docstring""" lowerCamelCase__ : Any =torch.stack([example['''pixel_values'''] for example in examples] ) lowerCamelCase__ : Dict =torch.tensor([example['''labels'''] for example in examples] ) return {"pixel_values": pixel_values, "labels": labels} def snake_case__ ( ): """simple docstring""" # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowerCamelCase__ : Dict =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_image_classification''' , __lowerCamelCase , __lowerCamelCase ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() lowerCamelCase__ : Union[str, Any] =training_args.get_process_log_level() logger.setLevel(__lowerCamelCase ) transformers.utils.logging.set_verbosity(__lowerCamelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(f'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. lowerCamelCase__ : Tuple =None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowerCamelCase__ : List[Any] =get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Set seed before initializing model. set_seed(training_args.seed ) # Initialize our dataset and prepare it for the 'image-classification' task. if data_args.dataset_name is not None: lowerCamelCase__ : List[Any] =load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='''image-classification''' , use_auth_token=True if model_args.use_auth_token else None , ) else: lowerCamelCase__ : Dict ={} if data_args.train_dir is not None: lowerCamelCase__ : Optional[Any] =os.path.join(data_args.train_dir , '''**''' ) if data_args.validation_dir is not None: lowerCamelCase__ : Tuple =os.path.join(data_args.validation_dir , '''**''' ) lowerCamelCase__ : List[str] =load_dataset( '''imagefolder''' , data_files=__lowerCamelCase , cache_dir=model_args.cache_dir , task='''image-classification''' , ) # If we don't have a validation split, split off a percentage of train as validation. lowerCamelCase__ : List[Any] =None if '''validation''' in dataset.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , __lowerCamelCase ) and data_args.train_val_split > 0.0: lowerCamelCase__ : Optional[Any] =dataset['''train'''].train_test_split(data_args.train_val_split ) lowerCamelCase__ : int =split['''train'''] lowerCamelCase__ : Dict =split['''test'''] # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. lowerCamelCase__ : Tuple =dataset['''train'''].features['''labels'''].names lowerCamelCase__ , lowerCamelCase__ : Optional[Any] ={}, {} for i, label in enumerate(__lowerCamelCase ): lowerCamelCase__ : int =str(__lowerCamelCase ) lowerCamelCase__ : Tuple =label # Load the accuracy metric from the datasets package lowerCamelCase__ : Dict =evaluate.load('''accuracy''' ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(__lowerCamelCase : List[Any] ): return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids ) lowerCamelCase__ : Union[str, Any] =AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(__lowerCamelCase ) , labelaid=__lowerCamelCase , idalabel=__lowerCamelCase , finetuning_task='''image-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) lowerCamelCase__ : List[str] =AutoModelForImageClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) lowerCamelCase__ : Tuple =AutoImageProcessor.from_pretrained( model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Define torchvision transforms to be applied to each image. if "shortest_edge" in image_processor.size: lowerCamelCase__ : List[str] =image_processor.size['''shortest_edge'''] else: lowerCamelCase__ : str =(image_processor.size['''height'''], image_processor.size['''width''']) lowerCamelCase__ : int =Normalize(mean=image_processor.image_mean , std=image_processor.image_std ) lowerCamelCase__ : Optional[int] =Compose( [ RandomResizedCrop(__lowerCamelCase ), RandomHorizontalFlip(), ToTensor(), normalize, ] ) lowerCamelCase__ : List[str] =Compose( [ Resize(__lowerCamelCase ), CenterCrop(__lowerCamelCase ), ToTensor(), normalize, ] ) def train_transforms(__lowerCamelCase : List[Any] ): lowerCamelCase__ : List[Any] =[ _train_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image'''] ] return example_batch def val_transforms(__lowerCamelCase : Any ): lowerCamelCase__ : Dict =[_val_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']] return example_batch if training_args.do_train: if "train" not in dataset: raise ValueError('''--do_train requires a train dataset''' ) if data_args.max_train_samples is not None: lowerCamelCase__ : Dict =( dataset['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms dataset["train"].set_transform(__lowerCamelCase ) if training_args.do_eval: if "validation" not in dataset: raise ValueError('''--do_eval requires a validation dataset''' ) if data_args.max_eval_samples is not None: lowerCamelCase__ : str =( dataset['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms dataset["validation"].set_transform(__lowerCamelCase ) # Initalize our trainer lowerCamelCase__ : Optional[int] =Trainer( model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=dataset['''train'''] if training_args.do_train else None , eval_dataset=dataset['''validation'''] if training_args.do_eval else None , compute_metrics=__lowerCamelCase , tokenizer=__lowerCamelCase , data_collator=__lowerCamelCase , ) # Training if training_args.do_train: lowerCamelCase__ : Union[str, Any] =None if training_args.resume_from_checkpoint is not None: lowerCamelCase__ : int =training_args.resume_from_checkpoint elif last_checkpoint is not None: lowerCamelCase__ : List[Any] =last_checkpoint lowerCamelCase__ : List[Any] =trainer.train(resume_from_checkpoint=__lowerCamelCase ) trainer.save_model() trainer.log_metrics('''train''' , train_result.metrics ) trainer.save_metrics('''train''' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: lowerCamelCase__ : Any =trainer.evaluate() trainer.log_metrics('''eval''' , __lowerCamelCase ) trainer.save_metrics('''eval''' , __lowerCamelCase ) # Write model card and (optionally) push to hub lowerCamelCase__ : List[Any] ={ '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''image-classification''', '''dataset''': data_args.dataset_name, '''tags''': ['''image-classification''', '''vision'''], } if training_args.push_to_hub: trainer.push_to_hub(**__lowerCamelCase ) else: trainer.create_model_card(**__lowerCamelCase ) if __name__ == "__main__": main()
272
"""simple docstring""" import argparse import torch from transformers import YosoConfig, YosoForMaskedLM def snake_case__ ( __lowerCamelCase : str ): """simple docstring""" if "model" in orig_key: lowerCamelCase__ : Optional[int] =orig_key.replace('''model.''' , '''''' ) if "norm1" in orig_key: lowerCamelCase__ : Union[str, Any] =orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' ) if "norm2" in orig_key: lowerCamelCase__ : List[Any] =orig_key.replace('''norm2''' , '''output.LayerNorm''' ) if "norm" in orig_key: lowerCamelCase__ : List[str] =orig_key.replace('''norm''' , '''LayerNorm''' ) if "transformer" in orig_key: lowerCamelCase__ : str =orig_key.split('''.''' )[0].split('''_''' )[-1] lowerCamelCase__ : Dict =orig_key.replace(f'''transformer_{layer_num}''' , f'''encoder.layer.{layer_num}''' ) if "mha.attn" in orig_key: lowerCamelCase__ : Union[str, Any] =orig_key.replace('''mha.attn''' , '''attention.self''' ) if "mha" in orig_key: lowerCamelCase__ : str =orig_key.replace('''mha''' , '''attention''' ) if "W_q" in orig_key: lowerCamelCase__ : Union[str, Any] =orig_key.replace('''W_q''' , '''self.query''' ) if "W_k" in orig_key: lowerCamelCase__ : Optional[int] =orig_key.replace('''W_k''' , '''self.key''' ) if "W_v" in orig_key: lowerCamelCase__ : List[str] =orig_key.replace('''W_v''' , '''self.value''' ) if "ff1" in orig_key: lowerCamelCase__ : Dict =orig_key.replace('''ff1''' , '''intermediate.dense''' ) if "ff2" in orig_key: lowerCamelCase__ : Union[str, Any] =orig_key.replace('''ff2''' , '''output.dense''' ) if "ff" in orig_key: lowerCamelCase__ : str =orig_key.replace('''ff''' , '''output.dense''' ) if "mlm_class" in orig_key: lowerCamelCase__ : Tuple =orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' ) if "mlm" in orig_key: lowerCamelCase__ : Optional[int] =orig_key.replace('''mlm''' , '''cls.predictions.transform''' ) if "cls" not in orig_key: lowerCamelCase__ : Optional[int] ='''yoso.''' + orig_key return orig_key def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Any ): """simple docstring""" for key in orig_state_dict.copy().keys(): lowerCamelCase__ : Optional[Any] =orig_state_dict.pop(__lowerCamelCase ) if ("pooler" in key) or ("sen_class" in key): continue else: lowerCamelCase__ : List[str] =val lowerCamelCase__ : Optional[int] =orig_state_dict['''cls.predictions.decoder.bias'''] lowerCamelCase__ : str =torch.arange(__lowerCamelCase ).expand((1, -1) ) + 2 return orig_state_dict def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple ): """simple docstring""" lowerCamelCase__ : Union[str, Any] =torch.load(__lowerCamelCase , map_location='''cpu''' )['''model_state_dict'''] lowerCamelCase__ : List[Any] =YosoConfig.from_json_file(__lowerCamelCase ) lowerCamelCase__ : List[str] =YosoForMaskedLM(__lowerCamelCase ) lowerCamelCase__ : Tuple =convert_checkpoint_helper(config.max_position_embeddings , __lowerCamelCase ) print(model.load_state_dict(__lowerCamelCase ) ) model.eval() model.save_pretrained(__lowerCamelCase ) print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' ) if __name__ == "__main__": _lowercase : int = argparse.ArgumentParser() # Required parameters parser.add_argument( "--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The json file for YOSO model config.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) _lowercase : Optional[Any] = parser.parse_args() convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
272
1