code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
lowercase__ :Tuple = { "A": ["B", "C", "E"], "B": ["A", "D", "E"], "C": ["A", "F", "G"], "D": ["B"], "E": ["A", "B", "D"], "F": ["C"], "G": ["C"], } def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): '''simple docstring''' lowercase = set() # keep track of all the paths to be checked lowercase = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue lowercase = queue.pop(0 ) # get the last node from the path lowercase = path[-1] if node not in explored: lowercase = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: lowercase = list(lowerCAmelCase__ ) new_path.append(lowerCAmelCase__ ) queue.append(lowerCAmelCase__ ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(lowerCAmelCase__ ) # in case there's no path between the 2 nodes return [] def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): '''simple docstring''' if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 lowercase = [start] lowercase = set(lowerCAmelCase__ ) # Keep tab on distances from `start` node. lowercase = {start: 0, target: -1} while queue: lowercase = queue.pop(0 ) if node == target: lowercase = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(lowerCAmelCase__ ) queue.append(lowerCAmelCase__ ) lowercase = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, "G", "D")) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, "G", "D")) # returns 4
101
import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict): # Initialise PyTorch model lowercase__ : List[str] = BertConfig.from_json_file(_lowerCamelCase) print(f'''Building PyTorch model from configuration: {config}''') lowercase__ : Optional[Any] = BertForPreTraining(_lowerCamelCase) # Load weights from tf checkpoint load_tf_weights_in_bert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''') torch.save(model.state_dict() , _lowerCamelCase) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--bert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained BERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) UpperCamelCase = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
87
0
"""simple docstring""" def lowercase ( _snake_case : dict ) ->set: """simple docstring""" __snake_case : int = set() # edges = list of graph's edges __snake_case : int = get_edges(_snake_case ) # While there are still elements in edges list, take an arbitrary edge # (from_node, to_node) and add his extremity to chosen_vertices and then # remove all arcs adjacent to the from_node and to_node while edges: __snake_case , __snake_case : str = edges.pop() chosen_vertices.add(_snake_case ) chosen_vertices.add(_snake_case ) for edge in edges.copy(): if from_node in edge or to_node in edge: edges.discard(_snake_case ) return chosen_vertices def lowercase ( _snake_case : dict ) ->set: """simple docstring""" __snake_case : Optional[Any] = set() for from_node, to_nodes in graph.items(): for to_node in to_nodes: edges.add((from_node, to_node) ) return edges if __name__ == "__main__": import doctest doctest.testmod() # graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} # print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
102
import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse('''3.8'''): import importlib_metadata else: import importlib.metadata as importlib_metadata def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : List[str]=False): try: lowercase__ : Union[str, Any] = os.environ[key] except KeyError: # KEY isn't set, default to `default`. lowercase__ : int = default else: # KEY is set, convert it to True or False. try: lowercase__ : Optional[int] = strtobool(_lowerCamelCase) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f'''If set, {key} must be yes or no.''') return _value UpperCamelCase = parse_flag_from_env('''RUN_SLOW''', default=False) UpperCamelCase = parse_flag_from_env('''RUN_REMOTE''', default=False) UpperCamelCase = parse_flag_from_env('''RUN_LOCAL''', default=True) UpperCamelCase = parse_flag_from_env('''RUN_PACKAGED''', default=True) # Compression UpperCamelCase = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''') UpperCamelCase = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''') UpperCamelCase = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''') # Audio UpperCamelCase = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''), reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''', ) # Beam UpperCamelCase = pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''), reason='''test requires apache-beam and a compatible dill version''', ) # Dill-cloudpickle compatibility UpperCamelCase = pytest.mark.skipif( config.DILL_VERSION <= version.parse('''0.3.2'''), reason='''test requires dill>0.3.2 for cloudpickle compatibility''', ) # Windows UpperCamelCase = pytest.mark.skipif( sys.platform == '''win32''', reason='''test should not be run on Windows''', ) def lowercase_ ( _lowerCamelCase : int): try: import faiss # noqa except ImportError: lowercase__ : Optional[Any] = unittest.skip("test requires faiss")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): try: import regex # noqa except ImportError: lowercase__ : List[Any] = unittest.skip("test requires regex")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): try: import elasticsearch # noqa except ImportError: lowercase__ : Optional[int] = unittest.skip("test requires elasticsearch")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Union[str, Any]): try: import sqlalchemy # noqa except ImportError: lowercase__ : Optional[int] = unittest.skip("test requires sqlalchemy")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): if not config.TORCH_AVAILABLE: lowercase__ : Tuple = unittest.skip("test requires PyTorch")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Tuple): if not config.TF_AVAILABLE: lowercase__ : Any = unittest.skip("test requires TensorFlow")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Dict): if not config.JAX_AVAILABLE: lowercase__ : List[str] = unittest.skip("test requires JAX")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): if not config.PIL_AVAILABLE: lowercase__ : Dict = unittest.skip("test requires Pillow")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Tuple): try: import transformers # noqa F401 except ImportError: return unittest.skip("test requires transformers")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : Optional[Any]): try: import tiktoken # noqa F401 except ImportError: return unittest.skip("test requires tiktoken")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : Dict): try: import spacy # noqa F401 except ImportError: return unittest.skip("test requires spacy")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : Optional[int]): def _require_spacy_model(_lowerCamelCase : Optional[int]): try: import spacy # noqa F401 spacy.load(_lowerCamelCase) except ImportError: return unittest.skip("test requires spacy")(_lowerCamelCase) except OSError: return unittest.skip("test requires spacy model '{}'".format(_lowerCamelCase))(_lowerCamelCase) else: return test_case return _require_spacy_model def lowercase_ ( _lowerCamelCase : Dict): try: import pyspark # noqa F401 except ImportError: return unittest.skip("test requires pyspark")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : List[str]): try: import joblibspark # noqa F401 except ImportError: return unittest.skip("test requires joblibspark")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : Dict): if not _run_slow_tests or _run_slow_tests == 0: lowercase__ : Tuple = unittest.skip("test is slow")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): if not _run_local_tests or _run_local_tests == 0: lowercase__ : str = unittest.skip("test is local")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Optional[int]): if not _run_packaged_tests or _run_packaged_tests == 0: lowercase__ : List[Any] = unittest.skip("test is packaged")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Tuple): if not _run_remote_tests or _run_remote_tests == 0: lowercase__ : Union[str, Any] = unittest.skip("test requires remote")(_lowerCamelCase) return test_case def lowercase_ ( *_lowerCamelCase : str): def decorate(cls : str): for name, fn in cls.__dict__.items(): if callable(_lowerCamelCase) and name.startswith("test"): for decorator in decorators: lowercase__ : Optional[int] = decorator(_lowerCamelCase) setattr(cls , _lowerCamelCase , _lowerCamelCase) return cls return decorate class snake_case_ ( __A ): pass class snake_case_ ( __A ): __A : List[Any] = 0 __A : str = 1 __A : int = 2 @contextmanager def lowercase_ ( _lowerCamelCase : List[str]=OfflineSimulationMode.CONNECTION_FAILS , _lowerCamelCase : int=1E-16): lowercase__ : int = requests.Session().request def timeout_request(_lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Dict , **_lowerCamelCase : str): # Change the url to an invalid url so that the connection hangs lowercase__ : Any = "https://10.255.255.1" if kwargs.get("timeout") is None: raise RequestWouldHangIndefinitelyError( f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''') lowercase__ : Dict = timeout try: return online_request(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier lowercase__ : Dict = url lowercase__ : Union[str, Any] = e.args[0] lowercase__ : Optional[Any] = (max_retry_error.args[0].replace("10.255.255.1" , f'''OfflineMock[{url}]'''),) lowercase__ : int = (max_retry_error,) raise def raise_connection_error(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] , **_lowerCamelCase : Tuple): raise requests.ConnectionError("Offline mode is enabled." , request=_lowerCamelCase) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch("requests.Session.send" , _lowerCamelCase): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch("requests.Session.request" , _lowerCamelCase): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase): yield else: raise ValueError("Please use a value from the OfflineSimulationMode enum.") @contextmanager def lowercase_ ( *_lowerCamelCase : str , **_lowerCamelCase : Tuple): lowercase__ : Dict = str(Path().resolve()) with tempfile.TemporaryDirectory(*_lowerCamelCase , **_lowerCamelCase) as tmp_dir: try: os.chdir(_lowerCamelCase) yield finally: os.chdir(_lowerCamelCase) @contextmanager def lowercase_ ( ): import gc gc.collect() lowercase__ : Union[str, Any] = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def lowercase_ ( ): import gc gc.collect() lowercase__ : int = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]): return deepcopy(_lowerCamelCase).integers(0 , 100 , 10).tolist() == deepcopy(_lowerCamelCase).integers(0 , 100 , 10).tolist() def lowercase_ ( _lowerCamelCase : str): import decorator from requests.exceptions import HTTPError def _wrapper(_lowerCamelCase : str , *_lowerCamelCase : Dict , **_lowerCamelCase : Dict): try: return func(*_lowerCamelCase , **_lowerCamelCase) except HTTPError as err: if str(_lowerCamelCase).startswith("500") or str(_lowerCamelCase).startswith("502"): pytest.xfail(str(_lowerCamelCase)) raise err return decorator.decorator(_wrapper , _lowerCamelCase) class snake_case_ : def __init__( self : int , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : List[str] ) -> List[str]: lowercase__ : Tuple = returncode lowercase__ : int = stdout lowercase__ : Union[str, Any] = stderr async def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict): while True: lowercase__ : Optional[int] = await stream.readline() if line: callback(_lowerCamelCase) else: break async def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : int=None , _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Tuple=False): if echo: print("\nRunning: " , " ".join(_lowerCamelCase)) lowercase__ : Optional[int] = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) lowercase__ : str = [] lowercase__ : List[str] = [] def tee(_lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int]=""): lowercase__ : Optional[int] = line.decode("utf-8").rstrip() sink.append(_lowerCamelCase) if not quiet: print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label="stdout:")), _read_stream(p.stderr , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label="stderr:")), ] , timeout=_lowerCamelCase , ) return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase) def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str]=None , _lowerCamelCase : Dict=None , _lowerCamelCase : int=180 , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Optional[Any]=True): lowercase__ : Any = asyncio.get_event_loop() lowercase__ : Tuple = loop.run_until_complete( _stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase)) lowercase__ : int = " ".join(_lowerCamelCase) if result.returncode > 0: lowercase__ : Any = "\n".join(result.stderr) raise RuntimeError( f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n''' f'''The combined stderr from workers follows:\n{stderr}''') # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(f'''\'{cmd_str}\' produced no output.''') return result def lowercase_ ( ): lowercase__ : List[str] = os.environ.get("PYTEST_XDIST_WORKER" , "gw0") lowercase__ : str = re.sub(R"^gw" , "" , _lowerCamelCase , 0 , re.M) return int(_lowerCamelCase) def lowercase_ ( ): lowercase__ : Union[str, Any] = 2_9500 lowercase__ : Optional[int] = pytest_xdist_worker_id() return port + uniq_delta
87
0
import re import time from typing import Optional import IPython.display as disp from ..trainer_callback import TrainerCallback from ..trainer_utils import IntervalStrategy, has_length def UpperCamelCase( __UpperCamelCase : Optional[int] ): lowerCAmelCase_ : Union[str, Any] = int(__UpperCamelCase ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = t // 3600, (t // 60) % 60, t % 60 return f"""{h}:{m:02d}:{s:02d}""" if h != 0 else f"""{m:02d}:{s:02d}""" def UpperCamelCase( __UpperCamelCase : Dict ,__UpperCamelCase : Tuple ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : int=300 ): # docstyle-ignore return f""" <div> {prefix} <progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress> {label} </div> """ def UpperCamelCase( __UpperCamelCase : int ): lowerCAmelCase_ : str = '''<table border="1" class="dataframe">\n''' html_code += """ <thead>\n <tr style="text-align: left;">\n""" for i in items[0]: html_code += f""" <th>{i}</th>\n""" html_code += " </tr>\n </thead>\n <tbody>\n" for line in items[1:]: html_code += " <tr>\n" for elt in line: lowerCAmelCase_ : Optional[Any] = f"""{elt:.6f}""" if isinstance(__UpperCamelCase ,__UpperCamelCase ) else str(__UpperCamelCase ) html_code += f""" <td>{elt}</td>\n""" html_code += " </tr>\n" html_code += " </tbody>\n</table><p>" return html_code class __snake_case : _a = 5 _a = 0.2 def __init__( self : Optional[Any] , A_ : int , A_ : Optional[str] = None , A_ : bool = True , A_ : Optional["NotebookTrainingTracker"] = None , A_ : int = 3_0_0 , ): lowerCAmelCase_ : Union[str, Any] = total lowerCAmelCase_ : int = '''''' if prefix is None else prefix lowerCAmelCase_ : Optional[Any] = leave lowerCAmelCase_ : Union[str, Any] = parent lowerCAmelCase_ : Any = width lowerCAmelCase_ : Optional[Any] = None lowerCAmelCase_ : Tuple = None lowerCAmelCase_ : str = None def UpperCAmelCase__ ( self : Union[str, Any] , A_ : int , A_ : bool = False , A_ : str = None): lowerCAmelCase_ : Dict = value if comment is not None: lowerCAmelCase_ : List[str] = comment if self.last_value is None: lowerCAmelCase_ : List[str] = time.time() lowerCAmelCase_ : Dict = value lowerCAmelCase_ : Union[str, Any] = None lowerCAmelCase_ : str = self.warmup lowerCAmelCase_ : List[Any] = 1 self.update_bar(A_) elif value <= self.last_value and not force_update: return elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total): if self.first_calls > 0: self.first_calls -= 1 lowerCAmelCase_ : Tuple = time.time() lowerCAmelCase_ : List[str] = current_time - self.start_time # We could have value = self.start_value if the update is called twixe with the same start value. if value > self.start_value: lowerCAmelCase_ : int = self.elapsed_time / (value - self.start_value) else: lowerCAmelCase_ : str = None if value >= self.total: lowerCAmelCase_ : int = self.total lowerCAmelCase_ : List[str] = None if not self.leave: self.close() elif self.average_time_per_item is not None: lowerCAmelCase_ : List[str] = self.average_time_per_item * (self.total - value) self.update_bar(A_) lowerCAmelCase_ : Tuple = value lowerCAmelCase_ : Optional[int] = current_time if self.average_time_per_item is None: lowerCAmelCase_ : List[Any] = 1 else: lowerCAmelCase_ : Any = max(int(self.update_every / self.average_time_per_item) , 1) def UpperCAmelCase__ ( self : int , A_ : Optional[int] , A_ : List[str]=None): lowerCAmelCase_ : str = ''' ''' * (len(str(self.total)) - len(str(A_))) + str(A_) if self.elapsed_time is None: lowerCAmelCase_ : str = F"""[{spaced_value}/{self.total} : < :""" elif self.predicted_remaining is None: lowerCAmelCase_ : Tuple = F"""[{spaced_value}/{self.total} {format_time(self.elapsed_time)}""" else: lowerCAmelCase_ : Any = ( F"""[{spaced_value}/{self.total} {format_time(self.elapsed_time)} <""" F""" {format_time(self.predicted_remaining)}""" ) self.label += F""", {1/self.average_time_per_item:.2f} it/s""" self.label += "]" if self.comment is None or len(self.comment) == 0 else F""", {self.comment}]""" self.display() def UpperCAmelCase__ ( self : Tuple): lowerCAmelCase_ : Union[str, Any] = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width) if self.parent is not None: # If this is a child bar, the parent will take care of the display. self.parent.display() return if self.output is None: lowerCAmelCase_ : int = disp.display(disp.HTML(self.html_code) , display_id=A_) else: self.output.update(disp.HTML(self.html_code)) def UpperCAmelCase__ ( self : Optional[int]): if self.parent is None and self.output is not None: self.output.update(disp.HTML('''''')) class __snake_case ( UpperCamelCase_ ): def __init__( self : Union[str, Any] , A_ : Optional[int] , A_ : Dict=None): super().__init__(A_) lowerCAmelCase_ : int = None if column_names is None else [column_names] lowerCAmelCase_ : Optional[int] = None def UpperCAmelCase__ ( self : List[str]): lowerCAmelCase_ : Optional[Any] = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width) if self.inner_table is not None: self.html_code += text_to_html_table(self.inner_table) if self.child_bar is not None: self.html_code += self.child_bar.html_code if self.output is None: lowerCAmelCase_ : Optional[Any] = disp.display(disp.HTML(self.html_code) , display_id=A_) else: self.output.update(disp.HTML(self.html_code)) def UpperCAmelCase__ ( self : List[str] , A_ : Optional[int]): if self.inner_table is None: lowerCAmelCase_ : List[Any] = [list(values.keys()), list(values.values())] else: lowerCAmelCase_ : List[Any] = self.inner_table[0] if len(self.inner_table) == 1: # We give a chance to update the column names at the first iteration for key in values.keys(): if key not in columns: columns.append(A_) lowerCAmelCase_ : str = columns self.inner_table.append([values[c] for c in columns]) def UpperCAmelCase__ ( self : Optional[Any] , A_ : Optional[Any] , A_ : Any=None , A_ : Optional[int]=3_0_0): lowerCAmelCase_ : Any = NotebookProgressBar(A_ , prefix=A_ , parent=self , width=A_) return self.child_bar def UpperCAmelCase__ ( self : List[str]): lowerCAmelCase_ : Dict = None self.display() class __snake_case ( UpperCamelCase_ ): def __init__( self : Dict): lowerCAmelCase_ : str = None lowerCAmelCase_ : List[str] = None lowerCAmelCase_ : Tuple = False def UpperCAmelCase__ ( self : int , A_ : Optional[Any] , A_ : List[Any] , A_ : Union[str, Any] , **A_ : str): lowerCAmelCase_ : str = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step''' lowerCAmelCase_ : Optional[int] = 0 lowerCAmelCase_ : Any = 0 lowerCAmelCase_ : List[str] = [self.first_column] + ['''Training Loss'''] if args.evaluation_strategy != IntervalStrategy.NO: column_names.append('''Validation Loss''') lowerCAmelCase_ : str = NotebookTrainingTracker(state.max_steps , A_) def UpperCAmelCase__ ( self : Optional[int] , A_ : Optional[Any] , A_ : Optional[int] , A_ : List[str] , **A_ : List[Any]): lowerCAmelCase_ : int = int(state.epoch) if int(state.epoch) == state.epoch else F"""{state.epoch:.2f}""" self.training_tracker.update( state.global_step + 1 , comment=F"""Epoch {epoch}/{state.num_train_epochs}""" , force_update=self._force_next_update , ) lowerCAmelCase_ : Dict = False def UpperCAmelCase__ ( self : Optional[Any] , A_ : List[str] , A_ : Optional[Any] , A_ : int , A_ : List[str]=None , **A_ : List[str]): if not has_length(A_): return if self.prediction_bar is None: if self.training_tracker is not None: lowerCAmelCase_ : int = self.training_tracker.add_child(len(A_)) else: lowerCAmelCase_ : List[str] = NotebookProgressBar(len(A_)) self.prediction_bar.update(1) else: self.prediction_bar.update(self.prediction_bar.value + 1) def UpperCAmelCase__ ( self : List[Any] , A_ : Optional[Any] , A_ : List[str] , A_ : Dict , **A_ : Tuple): if self.prediction_bar is not None: self.prediction_bar.close() lowerCAmelCase_ : List[Any] = None def UpperCAmelCase__ ( self : List[Any] , A_ : Optional[Any] , A_ : str , A_ : Any , A_ : Tuple=None , **A_ : str): # Only for when there is no evaluation if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs: lowerCAmelCase_ : List[str] = {'''Training Loss''': logs['''loss''']} # First column is necessarily Step sine we're not in epoch eval strategy lowerCAmelCase_ : int = state.global_step self.training_tracker.write_line(A_) def UpperCAmelCase__ ( self : int , A_ : Union[str, Any] , A_ : Tuple , A_ : Tuple , A_ : Optional[Any]=None , **A_ : Optional[Any]): if self.training_tracker is not None: lowerCAmelCase_ : Any = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''} for log in reversed(state.log_history): if "loss" in log: lowerCAmelCase_ : Dict = log['''loss'''] break if self.first_column == "Epoch": lowerCAmelCase_ : str = int(state.epoch) else: lowerCAmelCase_ : Optional[int] = state.global_step lowerCAmelCase_ : Optional[Any] = '''eval''' for k in metrics: if k.endswith('''_loss'''): lowerCAmelCase_ : str = re.sub(r'''\_loss$''' , '''''' , A_) lowerCAmelCase_ : Any = metrics.pop('''total_flos''' , A_) lowerCAmelCase_ : List[Any] = metrics.pop('''epoch''' , A_) lowerCAmelCase_ : Optional[Any] = metrics.pop(F"""{metric_key_prefix}_runtime""" , A_) lowerCAmelCase_ : List[Any] = metrics.pop(F"""{metric_key_prefix}_samples_per_second""" , A_) lowerCAmelCase_ : List[Any] = metrics.pop(F"""{metric_key_prefix}_steps_per_second""" , A_) lowerCAmelCase_ : Tuple = metrics.pop(F"""{metric_key_prefix}_jit_compilation_time""" , A_) for k, v in metrics.items(): if k == F"""{metric_key_prefix}_loss""": lowerCAmelCase_ : Optional[int] = v else: lowerCAmelCase_ : str = k.split('''_''') lowerCAmelCase_ : List[str] = ''' '''.join([part.capitalize() for part in splits[1:]]) lowerCAmelCase_ : List[Any] = v self.training_tracker.write_line(A_) self.training_tracker.remove_child() lowerCAmelCase_ : Any = None # Evaluation takes a long time so we should force the next update. lowerCAmelCase_ : str = True def UpperCAmelCase__ ( self : Union[str, Any] , A_ : Optional[int] , A_ : Union[str, Any] , A_ : Any , **A_ : int): self.training_tracker.update( state.global_step , comment=F"""Epoch {int(state.epoch)}/{state.num_train_epochs}""" , force_update=A_) lowerCAmelCase_ : str = None
103
import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def lowercase_ ( _lowerCamelCase : int): lowercase__ : int = [] embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''', f'''stage{idx}.patch_embed.proj.weight''', )) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''', f'''stage{idx}.patch_embed.proj.bias''', )) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''', f'''stage{idx}.patch_embed.norm.weight''', )) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''', f'''stage{idx}.patch_embed.norm.bias''', )) return embed def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : int): lowercase__ : Optional[Any] = [] attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj.bias''', )) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''')) return attention_weights def lowercase_ ( _lowerCamelCase : Optional[int]): lowercase__ : Tuple = [] token.append((f'''cvt.encoder.stages.{idx}.cls_token''', "stage2.cls_token")) return token def lowercase_ ( ): lowercase__ : List[str] = [] head.append(("layernorm.weight", "norm.weight")) head.append(("layernorm.bias", "norm.bias")) head.append(("classifier.weight", "head.weight")) head.append(("classifier.bias", "head.bias")) return head def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]): lowercase__ : Optional[Any] = "imagenet-1k-id2label.json" lowercase__ : List[str] = 1000 lowercase__ : Dict = "huggingface/label-files" lowercase__ : List[Any] = num_labels lowercase__ : Tuple = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset")) , "r")) lowercase__ : Tuple = {int(_lowerCamelCase): v for k, v in idalabel.items()} lowercase__ : Any = idalabel lowercase__ : List[Any] = {v: k for k, v in idalabel.items()} lowercase__ : Optional[int] = CvtConfig(num_labels=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit("/" , 1)[-1][4:6] == "13": lowercase__ : Any = [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit("/" , 1)[-1][4:6] == "21": lowercase__ : Tuple = [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: lowercase__ : Union[str, Any] = [2, 2, 20] lowercase__ : Optional[Any] = [3, 12, 16] lowercase__ : Optional[Any] = [192, 768, 1024] lowercase__ : Union[str, Any] = CvtForImageClassification(_lowerCamelCase) lowercase__ : Tuple = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k") lowercase__ : int = image_size lowercase__ : Dict = torch.load(_lowerCamelCase , map_location=torch.device("cpu")) lowercase__ : Any = OrderedDict() lowercase__ : int = [] for idx in range(len(config.depth)): if config.cls_token[idx]: lowercase__ : Dict = list_of_state_dict + cls_token(_lowerCamelCase) lowercase__ : List[str] = list_of_state_dict + embeddings(_lowerCamelCase) for cnt in range(config.depth[idx]): lowercase__ : Any = list_of_state_dict + attention(_lowerCamelCase , _lowerCamelCase) lowercase__ : List[str] = list_of_state_dict + final() for gg in list_of_state_dict: print(_lowerCamelCase) for i in range(len(_lowerCamelCase)): lowercase__ : Dict = original_weights[list_of_state_dict[i][1]] model.load_state_dict(_lowerCamelCase) model.save_pretrained(_lowerCamelCase) image_processor.save_pretrained(_lowerCamelCase) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument( '''--cvt_model''', default='''cvt-w24''', type=str, help='''Name of the cvt model you\'d like to convert.''', ) parser.add_argument( '''--image_size''', default=384, type=int, help='''Input Image Size''', ) parser.add_argument( '''--cvt_file_name''', default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''', type=str, help='''Input Image Size''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) UpperCamelCase = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
87
0
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class lowercase_ (lowerCamelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = 'Salesforce/blip-image-captioning-base' SCREAMING_SNAKE_CASE : Union[str, Any] = ( 'This is a tool that generates a description of an image. It takes an input named `image` which should be the ' 'image to caption, and returns a text that contains the description in English.' ) SCREAMING_SNAKE_CASE : str = 'image_captioner' SCREAMING_SNAKE_CASE : int = AutoModelForVisionaSeq SCREAMING_SNAKE_CASE : str = ['image'] SCREAMING_SNAKE_CASE : Optional[Any] = ['text'] def __init__( self : Union[str, Any] ,*lowercase__ : Dict ,**lowercase__ : Optional[Any] ): requires_backends(self ,['''vision'''] ) super().__init__(*lowercase__ ,**lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : "Image" ): return self.pre_processor(images=lowercase__ ,return_tensors='''pt''' ) def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Tuple ): return self.model.generate(**lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ): return self.pre_processor.batch_decode(lowercase__ ,skip_special_tokens=lowercase__ )[0].strip()
104
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase = { '''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''], '''tokenization_electra''': ['''ElectraTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''ElectraTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ElectraForCausalLM''', '''ElectraForMaskedLM''', '''ElectraForMultipleChoice''', '''ElectraForPreTraining''', '''ElectraForQuestionAnswering''', '''ElectraForSequenceClassification''', '''ElectraForTokenClassification''', '''ElectraModel''', '''ElectraPreTrainedModel''', '''load_tf_weights_in_electra''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFElectraForMaskedLM''', '''TFElectraForMultipleChoice''', '''TFElectraForPreTraining''', '''TFElectraForQuestionAnswering''', '''TFElectraForSequenceClassification''', '''TFElectraForTokenClassification''', '''TFElectraModel''', '''TFElectraPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''FlaxElectraForCausalLM''', '''FlaxElectraForMaskedLM''', '''FlaxElectraForMultipleChoice''', '''FlaxElectraForPreTraining''', '''FlaxElectraForQuestionAnswering''', '''FlaxElectraForSequenceClassification''', '''FlaxElectraForTokenClassification''', '''FlaxElectraModel''', '''FlaxElectraPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
87
0
"""simple docstring""" from datetime import datetime import requests def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->bytes: '''simple docstring''' a : Dict = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url=" a : Dict = requests.get(base_url + url ).json()[0]["urls"][0]["src"] return requests.get(_lowercase ).content if __name__ == "__main__": a : str = input('''Enter Video/IGTV url: ''').strip() a : Any = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4''' with open(file_name, '''wb''') as fp: fp.write(download_video(url)) print(F'''Done. Video saved to disk as {file_name}.''')
105
import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case_ ( __A ,unittest.TestCase ): __A : Union[str, Any] = LEDTokenizer __A : Union[str, Any] = LEDTokenizerFast __A : Optional[Any] = True def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]: super().setUp() lowercase__ : List[str] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] lowercase__ : Optional[int] = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) ) lowercase__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] lowercase__ : Tuple = {"unk_token": "<unk>"} lowercase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowercase_ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowercase_ ) ) def __UpperCamelCase ( self : int , **lowercase_ : str ) -> List[Any]: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ ) def __UpperCamelCase ( self : List[Any] , **lowercase_ : Any ) -> List[Any]: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ ) def __UpperCamelCase ( self : str , lowercase_ : Any ) -> Tuple: return "lower newer", "lower newer" @cached_property def __UpperCamelCase ( self : Tuple ) -> Optional[Any]: return LEDTokenizer.from_pretrained("allenai/led-base-16384" ) @cached_property def __UpperCamelCase ( self : Tuple ) -> int: return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" ) @require_torch def __UpperCamelCase ( self : int ) -> List[Any]: lowercase__ : Union[str, Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."] lowercase__ : str = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : Dict = tokenizer(lowercase_ , max_length=len(lowercase_ ) , padding=lowercase_ , return_tensors="pt" ) self.assertIsInstance(lowercase_ , lowercase_ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) lowercase__ : Union[str, Any] = batch.input_ids.tolist()[0] self.assertListEqual(lowercase_ , lowercase_ ) @require_torch def __UpperCamelCase ( self : List[str] ) -> Tuple: lowercase__ : Dict = ["A long paragraph for summarization.", "Another paragraph for summarization."] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : Optional[int] = tokenizer(lowercase_ , padding=lowercase_ , return_tensors="pt" ) self.assertIn("input_ids" , lowercase_ ) self.assertIn("attention_mask" , lowercase_ ) self.assertNotIn("labels" , lowercase_ ) self.assertNotIn("decoder_attention_mask" , lowercase_ ) @require_torch def __UpperCamelCase ( self : Optional[Any] ) -> Any: lowercase__ : Dict = [ "Summary of the text.", "Another summary.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : Dict = tokenizer(text_target=lowercase_ , max_length=32 , padding="max_length" , return_tensors="pt" ) self.assertEqual(32 , targets["input_ids"].shape[1] ) @require_torch def __UpperCamelCase ( self : Optional[int] ) -> Tuple: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : int = tokenizer( ["I am a small frog" * 10_24, "I am a small frog"] , padding=lowercase_ , truncation=lowercase_ , return_tensors="pt" ) self.assertIsInstance(lowercase_ , lowercase_ ) self.assertEqual(batch.input_ids.shape , (2, 51_22) ) @require_torch def __UpperCamelCase ( self : List[str] ) -> Any: lowercase__ : Union[str, Any] = ["A long paragraph for summarization."] lowercase__ : List[Any] = [ "Summary of the text.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : List[Any] = tokenizer(lowercase_ , return_tensors="pt" ) lowercase__ : Dict = tokenizer(text_target=lowercase_ , return_tensors="pt" ) lowercase__ : Optional[int] = inputs["input_ids"] lowercase__ : str = targets["input_ids"] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def __UpperCamelCase ( self : Union[str, Any] ) -> Dict: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : int = ["Summary of the text.", "Another summary."] lowercase__ : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] lowercase__ : Tuple = tokenizer(lowercase_ , padding=lowercase_ ) lowercase__ : int = [[0] * len(lowercase_ ) for x in encoded_output["input_ids"]] lowercase__ : Any = tokenizer.pad(lowercase_ ) self.assertSequenceEqual(outputs["global_attention_mask"] , lowercase_ ) def __UpperCamelCase ( self : int ) -> Union[str, Any]: pass def __UpperCamelCase ( self : int ) -> Optional[Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) lowercase__ : List[str] = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) lowercase__ : List[Any] = "A, <mask> AllenNLP sentence." lowercase__ : Tuple = tokenizer_r.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ ) lowercase__ : List[str] = tokenizer_p.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ ) self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) ) self.assertEqual( sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , ) lowercase__ : Any = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] ) lowercase__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] ) self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
87
0
"""simple docstring""" from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging __UpperCamelCase : List[str] = logging.get_logger(__name__) __UpperCamelCase : Dict = { '''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''', '''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''', '''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''', '''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''', '''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''', '''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''', '''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''', '''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''', '''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''', '''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''', '''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''', '''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''', } class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" lowercase__ = "codegen" lowercase__ = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : Tuple ,lowercase_ : Tuple=5_0_4_0_0 ,lowercase_ : Any=2_0_4_8 ,lowercase_ : Union[str, Any]=2_0_4_8 ,lowercase_ : List[str]=4_0_9_6 ,lowercase_ : Dict=2_8 ,lowercase_ : List[Any]=1_6 ,lowercase_ : Tuple=6_4 ,lowercase_ : str=None ,lowercase_ : List[str]="gelu_new" ,lowercase_ : int=0.0 ,lowercase_ : Optional[Any]=0.0 ,lowercase_ : Union[str, Any]=0.0 ,lowercase_ : Tuple=1E-5 ,lowercase_ : Union[str, Any]=0.02 ,lowercase_ : str=True ,lowercase_ : Any=5_0_2_5_6 ,lowercase_ : List[str]=5_0_2_5_6 ,lowercase_ : List[str]=False ,**lowercase_ : Tuple ,): lowerCAmelCase__ : str = vocab_size lowerCAmelCase__ : Union[str, Any] = n_ctx lowerCAmelCase__ : Optional[int] = n_positions lowerCAmelCase__ : Dict = n_embd lowerCAmelCase__ : Any = n_layer lowerCAmelCase__ : Union[str, Any] = n_head lowerCAmelCase__ : List[str] = n_inner lowerCAmelCase__ : Optional[Any] = rotary_dim lowerCAmelCase__ : Union[str, Any] = activation_function lowerCAmelCase__ : Optional[int] = resid_pdrop lowerCAmelCase__ : Optional[Any] = embd_pdrop lowerCAmelCase__ : List[str] = attn_pdrop lowerCAmelCase__ : Union[str, Any] = layer_norm_epsilon lowerCAmelCase__ : Optional[int] = initializer_range lowerCAmelCase__ : Tuple = use_cache lowerCAmelCase__ : List[str] = bos_token_id lowerCAmelCase__ : Optional[Any] = eos_token_id super().__init__( bos_token_id=lowercase_ ,eos_token_id=lowercase_ ,tie_word_embeddings=lowercase_ ,**lowercase_ ) class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" def __init__( self : Optional[int] ,lowercase_ : PretrainedConfig ,lowercase_ : str = "default" ,lowercase_ : List[PatchingSpec] = None ,lowercase_ : bool = False ,): super().__init__(lowercase_ ,task=lowercase_ ,patching_specs=lowercase_ ,use_past=lowercase_ ) if not getattr(self._config ,'''pad_token_id''' ,lowercase_ ): # TODO: how to do that better? lowerCAmelCase__ : List[Any] = 0 @property def __lowerCAmelCase ( self : Tuple ): lowerCAmelCase__ : Union[str, Any] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(lowercase_ ,direction='''inputs''' ) lowerCAmelCase__ : Union[str, Any] = {0: '''batch''', 1: '''past_sequence + sequence'''} else: lowerCAmelCase__ : List[Any] = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def __lowerCAmelCase ( self : int ): return self._config.n_layer @property def __lowerCAmelCase ( self : List[Any] ): return self._config.n_head def __lowerCAmelCase ( self : Optional[Any] ,lowercase_ : PreTrainedTokenizer ,lowercase_ : int = -1 ,lowercase_ : int = -1 ,lowercase_ : bool = False ,lowercase_ : Optional[TensorType] = None ,): lowerCAmelCase__ : str = super(lowercase_ ,self ).generate_dummy_inputs( lowercase_ ,batch_size=lowercase_ ,seq_length=lowercase_ ,is_pair=lowercase_ ,framework=lowercase_ ) # We need to order the input in the way they appears in the forward() lowerCAmelCase__ : Any = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values lowerCAmelCase__ : Optional[Any] = seqlen + 2 lowerCAmelCase__ : Union[str, Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) lowerCAmelCase__ : Dict = [ (torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) for _ in range(self.num_layers ) ] lowerCAmelCase__ : Any = common_inputs['''attention_mask'''] if self.use_past: lowerCAmelCase__ : List[str] = ordered_inputs['''attention_mask'''].dtype lowerCAmelCase__ : Dict = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(lowercase_ ,lowercase_ ,dtype=lowercase_ )] ,dim=1 ) return ordered_inputs @property def __lowerCAmelCase ( self : Any ): return 1_3
106
import math from typing import Any, Callable, List, Optional, Tuple, Union import numpy as np import torch from ...models import TaFilmDecoder from ...schedulers import DDPMScheduler from ...utils import is_onnx_available, logging, randn_tensor if is_onnx_available(): from ..onnx_utils import OnnxRuntimeModel from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .continous_encoder import SpectrogramContEncoder from .notes_encoder import SpectrogramNotesEncoder UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name UpperCamelCase = 256 class snake_case_ ( __A ): __A : str = ["melgan"] def __init__( self : str , lowercase_ : SpectrogramNotesEncoder , lowercase_ : SpectrogramContEncoder , lowercase_ : TaFilmDecoder , lowercase_ : DDPMScheduler , lowercase_ : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None: super().__init__() # From MELGAN lowercase__ : List[Any] = math.log(1E-5 ) # Matches MelGAN training. lowercase__ : str = 4.0 # Largest value for most examples lowercase__ : Any = 1_28 self.register_modules( notes_encoder=lowercase_ , continuous_encoder=lowercase_ , decoder=lowercase_ , scheduler=lowercase_ , melgan=lowercase_ , ) def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str]=(-1.0, 1.0) , lowercase_ : Dict=False ) -> Optional[Any]: lowercase__ , lowercase__ : int = output_range if clip: lowercase__ : Optional[Any] = torch.clip(lowercase_ , self.min_value , self.max_value ) # Scale to [0, 1]. lowercase__ : List[str] = (features - self.min_value) / (self.max_value - self.min_value) # Scale to [min_out, max_out]. return zero_one * (max_out - min_out) + min_out def __UpperCamelCase ( self : Optional[int] , lowercase_ : List[str] , lowercase_ : List[str]=(-1.0, 1.0) , lowercase_ : List[Any]=False ) -> Union[str, Any]: lowercase__ , lowercase__ : Tuple = input_range lowercase__ : Optional[Any] = torch.clip(lowercase_ , lowercase_ , lowercase_ ) if clip else outputs # Scale to [0, 1]. lowercase__ : Union[str, Any] = (outputs - min_out) / (max_out - min_out) # Scale to [self.min_value, self.max_value]. return zero_one * (self.max_value - self.min_value) + self.min_value def __UpperCamelCase ( self : List[str] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Tuple ) -> List[str]: lowercase__ : Optional[Any] = input_tokens > 0 lowercase__ , lowercase__ : int = self.notes_encoder( encoder_input_tokens=lowercase_ , encoder_inputs_mask=lowercase_ ) lowercase__ , lowercase__ : List[Any] = self.continuous_encoder( encoder_inputs=lowercase_ , encoder_inputs_mask=lowercase_ ) return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)] def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : str ) -> Tuple: lowercase__ : Union[str, Any] = noise_time if not torch.is_tensor(lowercase_ ): lowercase__ : Optional[Any] = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device ) elif torch.is_tensor(lowercase_ ) and len(timesteps.shape ) == 0: lowercase__ : Optional[Any] = timesteps[None].to(input_tokens.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML lowercase__ : int = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device ) lowercase__ : str = self.decoder( encodings_and_masks=lowercase_ , decoder_input_tokens=lowercase_ , decoder_noise_time=lowercase_ ) return logits @torch.no_grad() def __call__( self : List[str] , lowercase_ : List[List[int]] , lowercase_ : Optional[torch.Generator] = None , lowercase_ : int = 1_00 , lowercase_ : bool = True , lowercase_ : str = "numpy" , lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]: if (callback_steps is None) or ( callback_steps is not None and (not isinstance(lowercase_ , lowercase_ ) or callback_steps <= 0) ): raise ValueError( F'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' F''' {type(lowercase_ )}.''' ) lowercase__ : str = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa ) lowercase__ : Optional[int] = np.zeros([1, 0, self.n_dims] , np.floataa ) lowercase__ : str = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device ) for i, encoder_input_tokens in enumerate(lowercase_ ): if i == 0: lowercase__ : Union[str, Any] = torch.from_numpy(pred_mel[:1].copy() ).to( device=self.device , dtype=self.decoder.dtype ) # The first chunk has no previous context. lowercase__ : List[str] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device ) else: # The full song pipeline does not feed in a context feature, so the mask # will be all 0s after the feature converter. Because we know we're # feeding in a full context chunk from the previous prediction, set it # to all 1s. lowercase__ : str = ones lowercase__ : str = self.scale_features( lowercase_ , output_range=[-1.0, 1.0] , clip=lowercase_ ) lowercase__ : str = self.encode( input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=lowercase_ , continuous_mask=lowercase_ , ) # Sample encoder_continuous_inputs shaped gaussian noise to begin loop lowercase__ : List[str] = randn_tensor( shape=encoder_continuous_inputs.shape , generator=lowercase_ , device=self.device , dtype=self.decoder.dtype , ) # set step values self.scheduler.set_timesteps(lowercase_ ) # Denoising diffusion loop for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): lowercase__ : Optional[int] = self.decode( encodings_and_masks=lowercase_ , input_tokens=lowercase_ , noise_time=t / self.scheduler.config.num_train_timesteps , ) # Compute previous output: x_t -> x_t-1 lowercase__ : Optional[Any] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample lowercase__ : Tuple = self.scale_to_features(lowercase_ , input_range=[-1.0, 1.0] ) lowercase__ : List[str] = mel[:1] lowercase__ : Optional[int] = mel.cpu().float().numpy() lowercase__ : str = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 ) # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(lowercase_ , lowercase_ ) logger.info("Generated segment" , lowercase_ ) if output_type == "numpy" and not is_onnx_available(): raise ValueError( "Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." ) elif output_type == "numpy" and self.melgan is None: raise ValueError( "Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." ) if output_type == "numpy": lowercase__ : Union[str, Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) ) else: lowercase__ : Dict = full_pred_mel if not return_dict: return (output,) return AudioPipelineOutput(audios=lowercase_ )
87
0
def __magic_name__ ( A : str, A : int ): '''simple docstring''' a = [[] for _ in range(A )] a = key - 1 if key <= 0: raise ValueError("Height of grid can't be 0 or negative" ) if key == 1 or len(A ) <= key: return input_string for position, character in enumerate(A ): a = position % (lowest * 2) # puts it in bounds a = min(A, lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append(A ) a = ["".join(A ) for row in temp_grid] a = "".join(A ) return output_string def __magic_name__ ( A : str, A : int ): '''simple docstring''' a = [] a = key - 1 if key <= 0: raise ValueError("Height of grid can't be 0 or negative" ) if key == 1: return input_string a = [[] for _ in range(A )] # generates template for position in range(len(A ) ): a = position % (lowest * 2) # puts it in bounds a = min(A, lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append("*" ) a = 0 for row in temp_grid: # fills in the characters a = input_string[counter : counter + len(A )] grid.append(list(A ) ) counter += len(A ) a = "" # reads as zigzag for position in range(len(A ) ): a = position % (lowest * 2) # puts it in bounds a = min(A, lowest * 2 - num ) # creates zigzag pattern output_string += grid[num][0] grid[num].pop(0 ) return output_string def __magic_name__ ( A : str ): '''simple docstring''' a = {} for key_guess in range(1, len(A ) ): # tries every key a = decrypt(A, A ) return results if __name__ == "__main__": import doctest doctest.testmod()
107
import unittest from datasets import load_dataset from transformers.pipelines import pipeline from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow @is_pipeline_test @require_torch class snake_case_ ( unittest.TestCase ): @require_torch def __UpperCamelCase ( self : Optional[int] ) -> List[Any]: lowercase__ : Union[str, Any] = pipeline( task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" ) lowercase__ : List[str] = load_dataset("ashraq/esc50" ) lowercase__ : List[Any] = dataset["train"]["audio"][-1]["array"] lowercase__ : Dict = audio_classifier(lowercase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] ) self.assertEqual( nested_simplify(lowercase_ ) , [{"score": 0.5_01, "label": "Sound of a dog"}, {"score": 0.4_99, "label": "Sound of vaccum cleaner"}] , ) @unittest.skip("No models are available in TF" ) def __UpperCamelCase ( self : str ) -> Optional[int]: pass @slow @require_torch def __UpperCamelCase ( self : List[str] ) -> int: lowercase__ : Tuple = pipeline( task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , ) # This is an audio of a dog lowercase__ : Union[str, Any] = load_dataset("ashraq/esc50" ) lowercase__ : Tuple = dataset["train"]["audio"][-1]["array"] lowercase__ : List[Any] = audio_classifier(lowercase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] ) self.assertEqual( nested_simplify(lowercase_ ) , [ {"score": 0.9_99, "label": "Sound of a dog"}, {"score": 0.0_01, "label": "Sound of vaccum cleaner"}, ] , ) lowercase__ : int = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] ) self.assertEqual( nested_simplify(lowercase_ ) , [ [ {"score": 0.9_99, "label": "Sound of a dog"}, {"score": 0.0_01, "label": "Sound of vaccum cleaner"}, ], ] * 5 , ) lowercase__ : Tuple = audio_classifier( [audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 ) self.assertEqual( nested_simplify(lowercase_ ) , [ [ {"score": 0.9_99, "label": "Sound of a dog"}, {"score": 0.0_01, "label": "Sound of vaccum cleaner"}, ], ] * 5 , ) @unittest.skip("No models are available in TF" ) def __UpperCamelCase ( self : Union[str, Any] ) -> Dict: pass
87
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ = { '''configuration_swinv2''': ['''SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Swinv2Config'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Swinv2ForImageClassification''', '''Swinv2ForMaskedImageModeling''', '''Swinv2Model''', '''Swinv2PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swinva import ( SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST, SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel, SwinvaPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
108
import operator def lowercase_ ( _lowerCamelCase : list , _lowerCamelCase : bool = False , _lowerCamelCase : list | None = None): lowercase__ : int = operator.lt if reverse else operator.gt lowercase__ : str = solution or [] if not arr: return solution lowercase__ : List[str] = [arr.pop(0)] for i, item in enumerate(_lowerCamelCase): if _operator(_lowerCamelCase , sublist[-1]): sublist.append(_lowerCamelCase) arr.pop(_lowerCamelCase) # merging sublist into solution list if not solution: solution.extend(_lowerCamelCase) else: while sublist: lowercase__ : str = sublist.pop(0) for i, xx in enumerate(_lowerCamelCase): if not _operator(_lowerCamelCase , _lowerCamelCase): solution.insert(_lowerCamelCase , _lowerCamelCase) break else: solution.append(_lowerCamelCase) strand_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
87
0
"""simple docstring""" from __future__ import annotations def _snake_case ( UpperCamelCase : Dict , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : Any ): # noqa: E741 while r - l > 1: UpperCAmelCase : int = (l + r) // 2 if v[m] >= key: UpperCAmelCase : Union[str, Any] = m else: UpperCAmelCase : Dict = m # noqa: E741 return r def _snake_case ( UpperCamelCase : list[int] ): if len(UpperCamelCase ) == 0: return 0 UpperCAmelCase : Union[str, Any] = [0] * len(UpperCamelCase ) UpperCAmelCase : Union[str, Any] = 1 UpperCAmelCase : Optional[Any] = v[0] for i in range(1 , len(UpperCamelCase ) ): if v[i] < tail[0]: UpperCAmelCase : List[str] = v[i] elif v[i] > tail[length - 1]: UpperCAmelCase : Dict = v[i] length += 1 else: UpperCAmelCase : Optional[Any] = v[i] return length if __name__ == "__main__": import doctest doctest.testmod()
109
import time import warnings from abc import ABC from copy import deepcopy from typing import Optional import torch from ..utils import add_start_docstrings, logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = R''' Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs (`Dict[str, Any]`, *optional*): Additional stopping criteria specific kwargs. Return: `bool`. `False` indicates we should continue, `True` indicates we should stop. ''' class snake_case_ ( __A ): @add_start_docstrings(lowercase_ ) def __call__( self : Optional[Any] , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : List[str] ) -> bool: raise NotImplementedError("StoppingCriteria needs to be subclassed" ) class snake_case_ ( __A ): def __init__( self : Dict , lowercase_ : int , lowercase_ : Optional[int] = None ) -> List[str]: lowercase__ : str = max_length lowercase__ : Optional[int] = max_position_embeddings @add_start_docstrings(lowercase_ ) def __call__( self : Tuple , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Union[str, Any] ) -> bool: lowercase__ : str = input_ids.shape[-1] lowercase__ : Any = cur_len >= self.max_length if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: logger.warning_once( "This is a friendly reminder - the current text generation call will exceed the model's predefined " F'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe ''' "exceptions, performance degradation, or nothing at all." ) return is_done class snake_case_ ( __A ): def __init__( self : Tuple , lowercase_ : int , lowercase_ : int ) -> List[str]: warnings.warn( "The class `MaxNewTokensCriteria` is deprecated. " F'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` ''' "with `max_length = start_length + max_new_tokens` instead." , lowercase_ , ) lowercase__ : Optional[int] = start_length lowercase__ : str = max_new_tokens lowercase__ : Tuple = start_length + max_new_tokens @add_start_docstrings(lowercase_ ) def __call__( self : List[Any] , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Dict ) -> bool: return input_ids.shape[-1] >= self.max_length class snake_case_ ( __A ): def __init__( self : Tuple , lowercase_ : float , lowercase_ : Optional[float] = None ) -> Dict: lowercase__ : List[str] = max_time lowercase__ : Tuple = time.time() if initial_timestamp is None else initial_timestamp @add_start_docstrings(lowercase_ ) def __call__( self : int , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Union[str, Any] ) -> bool: return time.time() - self.initial_timestamp > self.max_time class snake_case_ ( __A ): @add_start_docstrings(lowercase_ ) def __call__( self : str , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : List[str] ) -> bool: return any(criteria(lowercase_ , lowercase_ ) for criteria in self ) @property def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]: for stopping_criterium in self: if isinstance(lowercase_ , lowercase_ ): return stopping_criterium.max_length elif isinstance(lowercase_ , lowercase_ ): return stopping_criterium.max_length return None def lowercase_ ( _lowerCamelCase : StoppingCriteriaList , _lowerCamelCase : int): lowercase__ : Optional[int] = stopping_criteria.max_length lowercase__ : str = deepcopy(_lowerCamelCase) if stopping_max_length is not None and stopping_max_length != max_length: warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , _lowerCamelCase) elif stopping_max_length is None: new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowerCamelCase)) return new_stopping_criteria
87
0
import functools from typing import Any def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or len(SCREAMING_SNAKE_CASE ) == 0: raise ValueError('''the string should be not empty string''' ) if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not all( isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) > 0 for item in words ): raise ValueError('''the words should be a list of non-empty strings''' ) # Build trie lowercase__ = {} lowercase__ = '''WORD_KEEPER''' for word in words: lowercase__ = trie for c in word: if c not in trie_node: lowercase__ = {} lowercase__ = trie_node[c] lowercase__ = True lowercase__ = len(SCREAMING_SNAKE_CASE ) # Dynamic programming method @functools.cache def is_breakable(SCREAMING_SNAKE_CASE ) -> bool: if index == len_string: return True lowercase__ = trie for i in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): lowercase__ = trie_node.get(string[i] , SCREAMING_SNAKE_CASE ) if trie_node is None: return False if trie_node.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and is_breakable(i + 1 ): return True return False return is_breakable(0 ) if __name__ == "__main__": import doctest doctest.testmod()
110
from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def lowercase_ ( _lowerCamelCase : Dict[str, torch.Tensor]): lowercase__ : Any = [] lowercase__ : Optional[int] = [] lowercase__ : Tuple = [] for rt in rc.restypes: lowercase__ : Dict = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]] restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names]) lowercase__ : str = {name: i for i, name in enumerate(_lowerCamelCase)} restype_atomaa_to_atomaa_list.append( [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types]) restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names]) # Add dummy mapping for restype 'UNK' restype_atomaa_to_atomaa_list.append([0] * 14) restype_atomaa_to_atomaa_list.append([0] * 37) restype_atomaa_mask_list.append([0.0] * 14) lowercase__ : Union[str, Any] = torch.tensor( _lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , ) lowercase__ : str = torch.tensor( _lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , ) lowercase__ : List[str] = torch.tensor( _lowerCamelCase , dtype=torch.floataa , device=protein["aatype"].device , ) lowercase__ : str = protein["aatype"].to(torch.long) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein lowercase__ : Dict = restype_atomaa_to_atomaa[protein_aatype] lowercase__ : str = restype_atomaa_mask[protein_aatype] lowercase__ : List[Any] = residx_atomaa_mask lowercase__ : Optional[Any] = residx_atomaa_to_atomaa.long() # create the gather indices for mapping back lowercase__ : str = restype_atomaa_to_atomaa[protein_aatype] lowercase__ : str = residx_atomaa_to_atomaa.long() # create the corresponding mask lowercase__ : Optional[Any] = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["aatype"].device) for restype, restype_letter in enumerate(rc.restypes): lowercase__ : Tuple = rc.restype_atoa[restype_letter] lowercase__ : List[Any] = rc.residue_atoms[restype_name] for atom_name in atom_names: lowercase__ : Optional[int] = rc.atom_order[atom_name] lowercase__ : Tuple = 1 lowercase__ : Dict = restype_atomaa_mask[protein_aatype] lowercase__ : Any = residx_atomaa_mask return protein def lowercase_ ( _lowerCamelCase : Dict[str, torch.Tensor]): lowercase__ : Tuple = tree_map(lambda _lowerCamelCase: torch.tensor(_lowerCamelCase , device=batch["aatype"].device) , _lowerCamelCase , np.ndarray) lowercase__ : List[str] = tensor_tree_map(lambda _lowerCamelCase: np.array(_lowerCamelCase) , make_atomaa_masks(_lowerCamelCase)) return out
87
0
"""simple docstring""" def __SCREAMING_SNAKE_CASE ( A_ , A_ = False ): if n == 2: return True if not n % 2 or n < 2: return False if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit return False if n > 3_31_70_44_06_46_79_88_73_85_96_19_81 and not allow_probable: raise ValueError( '''Warning: upper bound of deterministic test is exceeded. ''' '''Pass allow_probable=True to allow probabilistic test. ''' '''A return value of True indicates a probable prime.''' ) # array bounds provided by analysis lowerCAmelCase__ : Optional[int] = [ 20_47, 1_37_36_53, 25_32_60_01, 32_15_03_17_51, 2_15_23_02_89_87_47, 3_47_47_49_66_03_83, 3_41_55_00_71_72_83_21, 1, 3_82_51_23_05_65_46_41_30_51, 1, 1, 31_86_65_85_78_34_03_11_51_16_74_61, 3_31_70_44_06_46_79_88_73_85_96_19_81, ] lowerCAmelCase__ : int = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41] for idx, _p in enumerate(_lowerCamelCase , 1 ): if n < _p: # then we have our last prime to check lowerCAmelCase__ : List[str] = primes[:idx] break lowerCAmelCase__ : Optional[Any] = n - 1, 0 # break up n -1 into a power of 2 (s) and # remaining odd component # essentially, solve for d * 2 ** s == n - 1 while d % 2 == 0: d //= 2 s += 1 for prime in plist: lowerCAmelCase__ : str = False for r in range(_lowerCamelCase ): lowerCAmelCase__ : int = pow(_lowerCamelCase , d * 2**r , _lowerCamelCase ) # see article for analysis explanation for m if (r == 0 and m == 1) or ((m + 1) % n == 0): lowerCAmelCase__ : Tuple = True # this loop will not determine compositeness break if pr: continue # if pr is False, then the above loop never evaluated to true, # and the n MUST be composite return False return True def __SCREAMING_SNAKE_CASE ( ): assert not miller_rabin(5_61 ) assert miller_rabin(5_63 ) # 2047 assert not miller_rabin(83_82_01 ) assert miller_rabin(83_82_07 ) # 1_373_653 assert not miller_rabin(17_31_60_01 ) assert miller_rabin(17_31_60_17 ) # 25_326_001 assert not miller_rabin(30_78_38_66_41 ) assert miller_rabin(30_78_38_66_53 ) # 3_215_031_751 assert not miller_rabin(1_71_30_45_57_48_01 ) assert miller_rabin(1_71_30_45_57_48_19 ) # 2_152_302_898_747 assert not miller_rabin(2_77_97_99_72_83_07 ) assert miller_rabin(2_77_97_99_72_83_27 ) # 3_474_749_660_383 assert not miller_rabin(1_13_85_00_23_90_94_41 ) assert miller_rabin(1_13_85_00_23_90_95_27 ) # 341_550_071_728_321 assert not miller_rabin(1_27_50_41_01_88_48_80_43_51 ) assert miller_rabin(1_27_50_41_01_88_48_80_43_91 ) # 3_825_123_056_546_413_051 assert not miller_rabin(7_96_66_46_44_58_50_77_87_79_18_67 ) assert miller_rabin(7_96_66_46_44_58_50_77_87_79_19_51 ) # 318_665_857_834_031_151_167_461 assert not miller_rabin(55_28_40_67_74_46_64_78_97_66_03_33 ) assert miller_rabin(55_28_40_67_74_46_64_78_97_66_03_59 ) # 3_317_044_064_679_887_385_961_981 # upper limit for probabilistic test if __name__ == "__main__": test_miller_rabin()
106
import unittest from transformers import BigBirdConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax from transformers.models.big_bird.modeling_flax_big_bird import ( FlaxBigBirdForCausalLM, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForPreTraining, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, FlaxBigBirdModel, ) class snake_case_ ( unittest.TestCase ): def __init__( self : Tuple , lowercase_ : List[Any] , lowercase_ : Union[str, Any]=2 , lowercase_ : Union[str, Any]=56 , lowercase_ : Tuple=True , lowercase_ : Optional[Any]=True , lowercase_ : Optional[Any]=True , lowercase_ : int=True , lowercase_ : Any=99 , lowercase_ : int=32 , lowercase_ : str=2 , lowercase_ : Union[str, Any]=2 , lowercase_ : Dict=7 , lowercase_ : Dict="gelu_new" , lowercase_ : Tuple=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : Tuple=5_12 , lowercase_ : Optional[Any]=16 , lowercase_ : List[Any]=2 , lowercase_ : Dict=0.02 , lowercase_ : int=4 , lowercase_ : Tuple="block_sparse" , lowercase_ : Dict=True , lowercase_ : Optional[int]=False , lowercase_ : Dict=2 , lowercase_ : int=3 , ) -> Union[str, Any]: lowercase__ : Dict = parent lowercase__ : Dict = batch_size lowercase__ : Tuple = seq_length lowercase__ : Dict = is_training lowercase__ : Dict = use_attention_mask lowercase__ : Tuple = use_token_type_ids lowercase__ : Optional[int] = use_labels lowercase__ : List[Any] = vocab_size lowercase__ : Any = hidden_size lowercase__ : List[Any] = num_hidden_layers lowercase__ : Union[str, Any] = num_attention_heads lowercase__ : str = intermediate_size lowercase__ : int = hidden_act lowercase__ : str = hidden_dropout_prob lowercase__ : List[str] = attention_probs_dropout_prob lowercase__ : Optional[Any] = max_position_embeddings lowercase__ : Union[str, Any] = type_vocab_size lowercase__ : Dict = type_sequence_label_size lowercase__ : Any = initializer_range lowercase__ : List[str] = num_choices lowercase__ : str = rescale_embeddings lowercase__ : Optional[Any] = attention_type lowercase__ : Optional[int] = use_bias lowercase__ : Optional[int] = block_size lowercase__ : str = num_random_blocks def __UpperCamelCase ( self : str ) -> Optional[Any]: lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ : str = None if self.use_attention_mask: lowercase__ : Any = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ : Optional[int] = None if self.use_token_type_ids: lowercase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase__ : int = BigBirdConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , ) return config, input_ids, token_type_ids, attention_mask def __UpperCamelCase ( self : Union[str, Any] ) -> int: lowercase__ : int = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs lowercase__ : Union[str, Any] = { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_flax class snake_case_ ( __A ,unittest.TestCase ): __A : Optional[int] = ( ( FlaxBigBirdForCausalLM, FlaxBigBirdModel, FlaxBigBirdForPreTraining, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, ) if is_flax_available() else () ) __A : List[str] = False __A : Any = False def __UpperCamelCase ( self : List[str] ) -> List[Any]: lowercase__ : Union[str, Any] = FlaxBigBirdModelTester(self ) @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : Optional[int] ) -> Dict: super().test_from_pretrained_save_pretrained() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : List[str] ) -> Any: super().test_from_pretrained_with_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : Tuple ) -> str: super().test_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : Dict ) -> Union[str, Any]: super().test_hidden_states_output() @slow def __UpperCamelCase ( self : Optional[int] ) -> Tuple: for model_class_name in self.all_model_classes: lowercase__ : Optional[Any] = model_class_name.from_pretrained("google/bigbird-roberta-base" ) self.assertIsNotNone(lowercase_ ) def __UpperCamelCase ( self : int ) -> Optional[int]: if self.test_attn_probs: super().test_attention_outputs() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : str ) -> Any: lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowercase__ : Union[str, Any] = self._prepare_for_class(lowercase_ , lowercase_ ) lowercase__ : Optional[Any] = model_class(lowercase_ ) @jax.jit def model_jitted(lowercase_ : Tuple , lowercase_ : int=None , **lowercase_ : Dict ): return model(input_ids=lowercase_ , attention_mask=lowercase_ , **lowercase_ ) with self.subTest("JIT Enabled" ): lowercase__ : int = model_jitted(**lowercase_ ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): lowercase__ : Any = model_jitted(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) ) for jitted_output, output in zip(lowercase_ , lowercase_ ): self.assertEqual(jitted_output.shape , output.shape ) def __UpperCamelCase ( self : List[Any] , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : List[Any]=1E-5 , lowercase_ : Any="outputs" , lowercase_ : List[str]=None ) -> List[Any]: # `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version, # an effort was done to return `attention_probs` (yet to be verified). if name.startswith("outputs.attentions" ): return else: super().check_pt_flax_outputs(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
87
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available A : int = { 'configuration_bridgetower': [ 'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BridgeTowerConfig', 'BridgeTowerTextConfig', 'BridgeTowerVisionConfig', ], 'processing_bridgetower': ['BridgeTowerProcessor'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : int = ['BridgeTowerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Any = [ 'BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST', 'BridgeTowerForContrastiveLearning', 'BridgeTowerForImageAndTextRetrieval', 'BridgeTowerForMaskedLM', 'BridgeTowerModel', 'BridgeTowerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_bridgetower import ( BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP, BridgeTowerConfig, BridgeTowerTextConfig, BridgeTowerVisionConfig, ) from .processing_bridgetower import BridgeTowerProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_bridgetower import BridgeTowerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bridgetower import ( BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST, BridgeTowerForContrastiveLearning, BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, BridgeTowerModel, BridgeTowerPreTrainedModel, ) else: import sys A : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure)
305
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCamelCase = { '''configuration_groupvit''': [ '''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GroupViTConfig''', '''GroupViTOnnxConfig''', '''GroupViTTextConfig''', '''GroupViTVisionConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GroupViTModel''', '''GroupViTPreTrainedModel''', '''GroupViTTextModel''', '''GroupViTVisionModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFGroupViTModel''', '''TFGroupViTPreTrainedModel''', '''TFGroupViTTextModel''', '''TFGroupViTVisionModel''', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
87
0
import logging import numpy as np import pytest from scipy.linalg import eigh logging.basicConfig(level=logging.INFO, format="%(message)s") def lowerCAmelCase_ ( _lowercase : np.ndarray) -> Tuple: """simple docstring""" return input_array.reshape((input_array.size, 1)) def lowerCAmelCase_ ( _lowercase : np.ndarray , _lowercase : np.ndarray , _lowercase : int) -> List[str]: """simple docstring""" a__ : int = np.nan for i in range(_lowerCamelCase): a__ : List[Any] = features[:, labels == i] a__ : Any = data.mean(1) # Centralize the data of class i a__ : List[Any] = data - column_reshape(_lowerCamelCase) if i > 0: # If covariance_sum is not None covariance_sum += np.dot(_lowerCamelCase , centered_data.T) else: # If covariance_sum is np.nan (i.e. first loop) a__ : str = np.dot(_lowerCamelCase , centered_data.T) return covariance_sum / features.shape[1] def lowerCAmelCase_ ( _lowercase : np.ndarray , _lowercase : np.ndarray , _lowercase : int) -> Optional[int]: """simple docstring""" a__ : str = features.mean(1) a__ : Any = np.nan for i in range(_lowerCamelCase): a__ : List[Any] = features[:, labels == i] a__ : Tuple = data.shape[1] a__ : Tuple = data.mean(1) if i > 0: # If covariance_sum is not None covariance_sum += device_data * np.dot( column_reshape(_lowerCamelCase) - column_reshape(_lowerCamelCase) , (column_reshape(_lowerCamelCase) - column_reshape(_lowerCamelCase)).T , ) else: # If covariance_sum is np.nan (i.e. first loop) a__ : str = device_data * np.dot( column_reshape(_lowerCamelCase) - column_reshape(_lowerCamelCase) , (column_reshape(_lowerCamelCase) - column_reshape(_lowerCamelCase)).T , ) return covariance_sum / features.shape[1] def lowerCAmelCase_ ( _lowercase : np.ndarray , _lowercase : int) -> str: """simple docstring""" # Check if the features have been loaded if features.any(): a__ : List[Any] = features.mean(1) # Center the dataset a__ : Any = features - np.reshape(_lowerCamelCase , (data_mean.size, 1)) a__ : Union[str, Any] = np.dot(_lowerCamelCase , centered_data.T) / features.shape[1] a__ : Optional[Any] = np.linalg.eigh(_lowerCamelCase) # Take all the columns in the reverse order (-1), and then takes only the first a__ : Union[str, Any] = eigenvectors[:, ::-1][:, 0:dimensions] # Project the database on the new space a__ : Dict = np.dot(filtered_eigenvectors.T , _lowerCamelCase) logging.info("""Principal Component Analysis computed""") return projected_data else: logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=_lowerCamelCase) logging.error("""Dataset empty""") raise AssertionError def lowerCAmelCase_ ( _lowercase : np.ndarray , _lowercase : np.ndarray , _lowercase : int , _lowercase : int) -> Optional[int]: """simple docstring""" assert classes > dimensions # Check if features have been already loaded if features.any: a__ : int = eigh( covariance_between_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) , covariance_within_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) , ) a__ : List[Any] = eigenvectors[:, ::-1][:, :dimensions] a__ : Dict = np.linalg.svd(_lowerCamelCase) a__ : Any = svd_matrix[:, 0:dimensions] a__ : Optional[int] = np.dot(filtered_svd_matrix.T , _lowerCamelCase) logging.info("""Linear Discriminant Analysis computed""") return projected_data else: logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=_lowerCamelCase) logging.error("""Dataset empty""") raise AssertionError def lowerCAmelCase_ ( ) -> Any: """simple docstring""" # Create dummy dataset with 2 classes and 3 features a__ : Any = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]]) a__ : Tuple = np.array([0, 0, 0, 1, 1]) a__ : Tuple = 2 a__ : Optional[int] = 2 # Assert that the function raises an AssertionError if dimensions > classes with pytest.raises(_lowerCamelCase) as error_info: a__ : Tuple = linear_discriminant_analysis( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) if isinstance(_lowerCamelCase , np.ndarray): raise AssertionError( """Did not raise AssertionError for dimensions > classes""") assert error_info.type is AssertionError def lowerCAmelCase_ ( ) -> List[Any]: """simple docstring""" a__ : List[str] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) a__ : Dict = 2 a__ : List[str] = np.array([[6.9282_0323, 8.6602_5404, 10.3923_0485], [3.0, 3.0, 3.0]]) with pytest.raises(_lowerCamelCase) as error_info: a__ : Dict = principal_component_analysis(_lowerCamelCase , _lowerCamelCase) if not np.allclose(_lowerCamelCase , _lowerCamelCase): raise AssertionError assert error_info.type is AssertionError if __name__ == "__main__": import doctest doctest.testmod()
170
import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : int): assert isinstance(_lowerCamelCase , _lowerCamelCase) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True]) def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : str): lowercase__ : Optional[int] = tmp_path / "cache" lowercase__ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase).read() _check_json_dataset(_lowerCamelCase , _lowerCamelCase) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict , _lowerCamelCase : Dict): lowercase__ : List[Any] = tmp_path / "cache" lowercase__ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : List[Any] = features.copy() if features else default_expected_features lowercase__ : List[Any] = ( Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None ) lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read() _check_json_dataset(_lowerCamelCase , _lowerCamelCase) @pytest.mark.parametrize( "features" , [ None, {"col_3": "float64", "col_1": "string", "col_2": "int64"}, ] , ) def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : List[str]): lowercase__ : Optional[Any] = tmp_path / "cache" lowercase__ : Tuple = {"col_3": "float64", "col_1": "string", "col_2": "int64"} lowercase__ : List[Any] = features.copy() if features else default_expected_features lowercase__ : int = ( Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None ) lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read() assert isinstance(_lowerCamelCase , _lowerCamelCase) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int]): # jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"} lowercase__ : Any = {"col_2": "int64", "col_3": "float64", "col_1": "string"} lowercase__ : str = features.copy() lowercase__ : str = ( Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None ) lowercase__ : Optional[int] = tmp_path / "cache" lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read() assert isinstance(_lowerCamelCase , _lowerCamelCase) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("split" , [None, NamedSplit("train"), "train", "test"]) def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str]): lowercase__ : Union[str, Any] = tmp_path / "cache" lowercase__ : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , split=_lowerCamelCase).read() _check_json_dataset(_lowerCamelCase , _lowerCamelCase) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list]) def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int): if issubclass(_lowerCamelCase , _lowerCamelCase): lowercase__ : Tuple = jsonl_path elif issubclass(_lowerCamelCase , _lowerCamelCase): lowercase__ : str = [jsonl_path] lowercase__ : str = tmp_path / "cache" lowercase__ : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : Tuple = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase).read() _check_json_dataset(_lowerCamelCase , _lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int]=("train",)): assert isinstance(_lowerCamelCase , _lowerCamelCase) for split in splits: lowercase__ : Optional[Any] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True]) def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : str): lowercase__ : List[str] = tmp_path / "cache" lowercase__ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowercase__ : Optional[Any] = JsonDatasetReader({"train": jsonl_path} , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase).read() _check_json_datasetdict(_lowerCamelCase , _lowerCamelCase) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : List[str]): lowercase__ : str = tmp_path / "cache" lowercase__ : Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : Tuple = features.copy() if features else default_expected_features lowercase__ : Union[str, Any] = ( Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None ) lowercase__ : Tuple = JsonDatasetReader({"train": jsonl_path} , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read() _check_json_datasetdict(_lowerCamelCase , _lowerCamelCase) @pytest.mark.parametrize("split" , [None, NamedSplit("train"), "train", "test"]) def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Tuple): if split: lowercase__ : Tuple = {split: jsonl_path} else: lowercase__ : Tuple = "train" lowercase__ : int = {"train": jsonl_path, "test": jsonl_path} lowercase__ : Dict = tmp_path / "cache" lowercase__ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase).read() _check_json_datasetdict(_lowerCamelCase , _lowerCamelCase , splits=list(path.keys())) assert all(dataset[split].split == split for split in path.keys()) def lowercase_ ( _lowerCamelCase : Union[str, Any]): return json.load(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Optional[int]): return [json.loads(_lowerCamelCase) for line in buffer] class snake_case_ : @pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] ) def __UpperCamelCase ( self : List[Any] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Optional[Any]: with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ ).write() buffer.seek(0 ) lowercase__ : Optional[int] = load_json_function(lowercase_ ) assert isinstance(lowercase_ , lowercase_ ) assert isinstance(exported_content[0] , lowercase_ ) assert len(lowercase_ ) == 10 @pytest.mark.parametrize( "orient, container, keys, len_at" , [ ("records", list, {"tokens", "labels", "answers", "id"}, None), ("split", dict, {"columns", "data"}, "data"), ("index", dict, set("0123456789" ), None), ("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"), ("values", list, None, None), ("table", dict, {"schema", "data"}, "data"), ] , ) def __UpperCamelCase ( self : str , lowercase_ : int , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Tuple ) -> List[str]: with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , orient=lowercase_ ).write() buffer.seek(0 ) lowercase__ : str = load_json(lowercase_ ) assert isinstance(lowercase_ , lowercase_ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(lowercase_ , "keys" ) and not hasattr(exported_content[0] , "keys" ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(lowercase_ ) == 10 @pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] ) def __UpperCamelCase ( self : List[Any] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Optional[int]: with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , num_proc=2 ).write() buffer.seek(0 ) lowercase__ : str = load_json_function(lowercase_ ) assert isinstance(lowercase_ , lowercase_ ) assert isinstance(exported_content[0] , lowercase_ ) assert len(lowercase_ ) == 10 @pytest.mark.parametrize( "orient, container, keys, len_at" , [ ("records", list, {"tokens", "labels", "answers", "id"}, None), ("split", dict, {"columns", "data"}, "data"), ("index", dict, set("0123456789" ), None), ("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"), ("values", list, None, None), ("table", dict, {"schema", "data"}, "data"), ] , ) def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Any: with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , orient=lowercase_ , num_proc=2 ).write() buffer.seek(0 ) lowercase__ : Optional[Any] = load_json(lowercase_ ) assert isinstance(lowercase_ , lowercase_ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(lowercase_ , "keys" ) and not hasattr(exported_content[0] , "keys" ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(lowercase_ ) == 10 def __UpperCamelCase ( self : Dict , lowercase_ : List[str] ) -> str: with pytest.raises(lowercase_ ): with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , num_proc=0 ) @pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] ) def __UpperCamelCase ( self : List[Any] , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : List[Any] ) -> Any: lowercase__ : Dict = tmp_path_factory.mktemp("data" ) / F'''test.json.{extension}''' lowercase__ : Optional[int] = str(shared_datadir / F'''test_file.json.{extension}''' ) JsonDatasetWriter(lowercase_ , lowercase_ , compression=lowercase_ ).write() with fsspec.open(lowercase_ , "rb" , compression="infer" ) as f: lowercase__ : List[Any] = f.read() with fsspec.open(lowercase_ , "rb" , compression="infer" ) as f: lowercase__ : str = f.read() assert exported_content == original_content
87
0
'''simple docstring''' def _UpperCamelCase ( __A ) -> int: '''simple docstring''' if isinstance(_lowerCamelCase , _lowerCamelCase ): raise TypeError("'float' object cannot be interpreted as an integer" ) if isinstance(_lowerCamelCase , _lowerCamelCase ): raise TypeError("'str' object cannot be interpreted as an integer" ) if num == 0: return "0b0" UpperCamelCase__ = False if num < 0: UpperCamelCase__ = True UpperCamelCase__ = -num UpperCamelCase__ = [] while num > 0: binary.insert(0 , num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(_lowerCamelCase ) for e in binary ) return "0b" + "".join(str(_lowerCamelCase ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
80
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class snake_case_ ( __A ): __A : Optional[Any] = ["image_processor", "tokenizer"] __A : Tuple = "LayoutLMv3ImageProcessor" __A : List[Any] = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast") def __init__( self : Union[str, Any] , lowercase_ : int=None , lowercase_ : str=None , **lowercase_ : Optional[Any] ) -> Optional[int]: lowercase__ : Union[str, Any] = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , lowercase_ , ) lowercase__ : Optional[int] = kwargs.pop("feature_extractor" ) lowercase__ : int = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(lowercase_ , lowercase_ ) def __call__( self : Dict , lowercase_ : Optional[Any] , lowercase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase_ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowercase_ : Union[List[List[int]], List[List[List[int]]]] = None , lowercase_ : Optional[Union[List[int], List[List[int]]]] = None , lowercase_ : bool = True , lowercase_ : Union[bool, str, PaddingStrategy] = False , lowercase_ : Union[bool, str, TruncationStrategy] = None , lowercase_ : Optional[int] = None , lowercase_ : int = 0 , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = True , lowercase_ : Optional[Union[str, TensorType]] = None , **lowercase_ : Dict , ) -> BatchEncoding: # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( "You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( "You cannot provide word labels if you initialized the image processor with apply_ocr set to True." ) # first, apply the image processor lowercase__ : Union[str, Any] = self.image_processor(images=lowercase_ , return_tensors=lowercase_ ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(lowercase_ , lowercase_ ): lowercase__ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension) lowercase__ : Any = features["words"] lowercase__ : Tuple = self.tokenizer( text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , ) # add pixel values lowercase__ : Optional[int] = features.pop("pixel_values" ) if return_overflowing_tokens is True: lowercase__ : Dict = self.get_overflowing_images(lowercase_ , encoded_inputs["overflow_to_sample_mapping"] ) lowercase__ : str = images return encoded_inputs def __UpperCamelCase ( self : List[Any] , lowercase_ : Tuple , lowercase_ : List[Any] ) -> Dict: # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image lowercase__ : Tuple = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(lowercase_ ) != len(lowercase_ ): raise ValueError( "Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got" F''' {len(lowercase_ )} and {len(lowercase_ )}''' ) return images_with_overflow def __UpperCamelCase ( self : int , *lowercase_ : Union[str, Any] , **lowercase_ : List[str] ) -> Union[str, Any]: return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : Union[str, Any] , *lowercase_ : str , **lowercase_ : int ) -> Dict: return self.tokenizer.decode(*lowercase_ , **lowercase_ ) @property def __UpperCamelCase ( self : Any ) -> Any: return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase_ , ) return self.image_processor_class @property def __UpperCamelCase ( self : List[Any] ) -> Tuple: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase_ , ) return self.image_processor
87
0
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase = logging.get_logger(__name__) lowerCamelCase = { 'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json', # See all Cvt models at https://huggingface.co/models?filter=cvt } class A ( __A ): UpperCamelCase__ : str ="cvt" def __init__( self : Dict , lowercase_ : int=3 , lowercase_ : Optional[Any]=[7, 3, 3] , lowercase_ : Dict=[4, 2, 2] , lowercase_ : List[Any]=[2, 1, 1] , lowercase_ : Union[str, Any]=[64, 192, 384] , lowercase_ : Union[str, Any]=[1, 3, 6] , lowercase_ : Optional[Any]=[1, 2, 10] , lowercase_ : Tuple=[4.0, 4.0, 4.0] , lowercase_ : Tuple=[0.0, 0.0, 0.0] , lowercase_ : str=[0.0, 0.0, 0.0] , lowercase_ : Union[str, Any]=[0.0, 0.0, 0.1] , lowercase_ : int=[True, True, True] , lowercase_ : str=[False, False, True] , lowercase_ : Dict=["dw_bn", "dw_bn", "dw_bn"] , lowercase_ : Any=[3, 3, 3] , lowercase_ : Dict=[1, 1, 1] , lowercase_ : Any=[2, 2, 2] , lowercase_ : List[str]=[1, 1, 1] , lowercase_ : Any=[1, 1, 1] , lowercase_ : List[Any]=0.02 , lowercase_ : Optional[int]=1E-12 , **lowercase_ : Any , ) -> List[Any]: """simple docstring""" super().__init__(**lowercase_ ) _lowerCamelCase : Tuple =num_channels _lowerCamelCase : Optional[Any] =patch_sizes _lowerCamelCase : Optional[int] =patch_stride _lowerCamelCase : Tuple =patch_padding _lowerCamelCase : Dict =embed_dim _lowerCamelCase : str =num_heads _lowerCamelCase : Dict =depth _lowerCamelCase : List[str] =mlp_ratio _lowerCamelCase : Any =attention_drop_rate _lowerCamelCase : Union[str, Any] =drop_rate _lowerCamelCase : Optional[int] =drop_path_rate _lowerCamelCase : Any =qkv_bias _lowerCamelCase : Optional[Any] =cls_token _lowerCamelCase : Optional[Any] =qkv_projection_method _lowerCamelCase : Optional[Any] =kernel_qkv _lowerCamelCase : Optional[int] =padding_kv _lowerCamelCase : Union[str, Any] =stride_kv _lowerCamelCase : str =padding_q _lowerCamelCase : Optional[Any] =stride_q _lowerCamelCase : Tuple =initializer_range _lowerCamelCase : str =layer_norm_eps
199
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging UpperCamelCase = logging.get_logger(__name__) if is_vision_available(): import PIL class snake_case_ ( __A ): __A : str = ["pixel_values"] def __init__( self : int , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 2_55 , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = True , **lowercase_ : Union[str, Any] , ) -> None: super().__init__(**lowercase_ ) lowercase__ : Tuple = size if size is not None else {"shortest_edge": 2_24} lowercase__ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_ ) lowercase__ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24} lowercase__ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_ , param_name="crop_size" ) lowercase__ : Dict = do_resize lowercase__ : List[Any] = size lowercase__ : int = resample lowercase__ : Union[str, Any] = do_center_crop lowercase__ : Optional[int] = crop_size lowercase__ : List[str] = do_rescale lowercase__ : int = rescale_factor lowercase__ : List[Any] = do_normalize lowercase__ : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN lowercase__ : str = image_std if image_std is not None else OPENAI_CLIP_STD lowercase__ : Dict = do_convert_rgb def __UpperCamelCase ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Union[str, Any] , ) -> np.ndarray: lowercase__ : str = get_size_dict(lowercase_ , default_to_square=lowercase_ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) lowercase__ : Dict = get_resize_output_image_size(lowercase_ , size=size["shortest_edge"] , default_to_square=lowercase_ ) return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : int , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : int , ) -> np.ndarray: lowercase__ : Optional[Any] = get_size_dict(lowercase_ ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(lowercase_ , size=(size["height"], size["width"]) , data_format=lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[Any] , ) -> Any: return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : str , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : str , ) -> np.ndarray: return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : Optional[Any] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : int = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **lowercase_ : Union[str, Any] , ) -> PIL.Image.Image: lowercase__ : int = do_resize if do_resize is not None else self.do_resize lowercase__ : Dict = size if size is not None else self.size lowercase__ : List[Any] = get_size_dict(lowercase_ , param_name="size" , default_to_square=lowercase_ ) lowercase__ : Dict = resample if resample is not None else self.resample lowercase__ : int = do_center_crop if do_center_crop is not None else self.do_center_crop lowercase__ : Dict = crop_size if crop_size is not None else self.crop_size lowercase__ : List[str] = get_size_dict(lowercase_ , param_name="crop_size" , default_to_square=lowercase_ ) lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale lowercase__ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase__ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize lowercase__ : int = image_mean if image_mean is not None else self.image_mean lowercase__ : List[str] = image_std if image_std is not None else self.image_std lowercase__ : Union[str, Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowercase__ : Union[str, Any] = make_list_of_images(lowercase_ ) if not valid_images(lowercase_ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowercase__ : Dict = [convert_to_rgb(lowercase_ ) for image in images] # All transformations expect numpy arrays. lowercase__ : Optional[Any] = [to_numpy_array(lowercase_ ) for image in images] if do_resize: lowercase__ : List[Any] = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images] if do_center_crop: lowercase__ : int = [self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images] if do_rescale: lowercase__ : str = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images] if do_normalize: lowercase__ : Optional[int] = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images] lowercase__ : Optional[Any] = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images] lowercase__ : List[str] = {"pixel_values": images} return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
87
0
"""simple docstring""" import numpy as np from scipy.spatial.distance import cdist from sklearn.metrics import fa_score import datasets __A : Tuple = '''\ @inproceedings{kakwani2020indicnlpsuite, title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}}, author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar}, year={2020}, booktitle={Findings of EMNLP}, } ''' __A : Dict = '''\ IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te. ''' __A : Optional[Any] = ''' Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset. Args: predictions: list of predictions to score (as int64), except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32). references: list of ground truth labels corresponding to the predictions (as int64), except for \'cvit-mkb-clsr\' where each reference is a vector (of float32). Returns: depending on the IndicGLUE subset, one or several of: "accuracy": Accuracy "f1": F1 score "precision": Precision@10 Examples: >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"] >>> references = [0, 1] >>> predictions = [0, 1] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0, \'f1\': 1.0} >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\') >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]] >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'precision@10\': 1.0} ''' def lowercase ( __snake_case : List[str] , __snake_case : Union[str, Any] ): return float((preds == labels).mean() ) def lowercase ( __snake_case : Dict , __snake_case : str ): lowercase_ : int = simple_accuracy(_lowerCamelCase , _lowerCamelCase ) lowercase_ : Optional[Any] = float(fa_score(y_true=_lowerCamelCase , y_pred=_lowerCamelCase ) ) return { "accuracy": acc, "f1": fa, } def lowercase ( __snake_case : Optional[Any] , __snake_case : Any ): lowercase_ : int = np.array(_lowerCamelCase ) lowercase_ : List[Any] = np.array(_lowerCamelCase ) lowercase_ : Dict = en_sentvecs.shape[0] # mean centering lowercase_ : int = en_sentvecs - np.mean(_lowerCamelCase , axis=0 ) lowercase_ : List[Any] = in_sentvecs - np.mean(_lowerCamelCase , axis=0 ) lowercase_ : Union[str, Any] = cdist(_lowerCamelCase , _lowerCamelCase , '''cosine''' ) lowercase_ : str = np.array(range(_lowerCamelCase ) ) lowercase_ : Optional[int] = sim.argsort(axis=1 )[:, :1_0] lowercase_ : Any = np.any(preds == actual[:, None] , axis=1 ) return float(matches.mean() ) @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _UpperCAmelCase ( datasets.Metric ): def A ( self : Any ) -> Tuple: if self.config_name not in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", "wiki-ner", ]: raise KeyError( '''You should supply a configuration name selected in ''' '''[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", ''' '''\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", ''' '''\"wiki-ner\"]''' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''int64''' ) if self.config_name != '''cvit-mkb-clsr''' else datasets.Sequence(datasets.Value('''float32''' ) ), '''references''': datasets.Value('''int64''' ) if self.config_name != '''cvit-mkb-clsr''' else datasets.Sequence(datasets.Value('''float32''' ) ), } ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , ) def A ( self : Dict , A : List[Any] , A : Tuple ) -> List[Any]: if self.config_name == "cvit-mkb-clsr": return {"precision@10": precision_at_aa(lowercase_ , lowercase_ )} elif self.config_name in ["wiki-ner"]: return acc_and_fa(lowercase_ , lowercase_ ) elif self.config_name in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md", ]: return {"accuracy": simple_accuracy(lowercase_ , lowercase_ )} else: raise KeyError( '''You should supply a configuration name selected in ''' '''[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", ''' '''\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", ''' '''\"wiki-ner\"]''' )
33
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available UpperCamelCase = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''GPTSw3Tokenizer'''] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_swa import GPTSwaTokenizer else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
87
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) A__: Any = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__: int = ['''NllbTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__: Tuple = ['''NllbTokenizerFast'''] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb import NllbTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb_fast import NllbTokenizerFast else: import sys A__: Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
276
UpperCamelCase = [0, 2, 4, 6, 8] UpperCamelCase = [1, 3, 5, 7, 9] def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[int] , _lowerCamelCase : int): if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1 , -1 , -1): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 10 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 lowercase__ : str = 0 for digit in range(10): lowercase__ : str = digit result += reversible_numbers( 0 , (remainder + 2 * digit) // 10 , _lowerCamelCase , _lowerCamelCase) return result lowercase__ : Dict = 0 for digita in range(10): lowercase__ : int = digita if (remainder + digita) % 2 == 0: lowercase__ : Optional[Any] = ODD_DIGITS else: lowercase__ : str = EVEN_DIGITS for digita in other_parity_digits: lowercase__ : List[str] = digita result += reversible_numbers( remaining_length - 2 , (remainder + digita + digita) // 10 , _lowerCamelCase , _lowerCamelCase , ) return result def lowercase_ ( _lowerCamelCase : int = 9): lowercase__ : Tuple = 0 for length in range(1 , max_power + 1): result += reversible_numbers(_lowerCamelCase , 0 , [0] * length , _lowerCamelCase) return result if __name__ == "__main__": print(f"{solution() = }")
87
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available UpperCamelCase_ = {"""configuration_speech_encoder_decoder""": ["""SpeechEncoderDecoderConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["""SpeechEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["""FlaxSpeechEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
309
import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets UpperCamelCase = '''\ @inproceedings{snover-etal-2006-study, title = "A Study of Translation Edit Rate with Targeted Human Annotation", author = "Snover, Matthew and Dorr, Bonnie and Schwartz, Rich and Micciulla, Linnea and Makhoul, John", booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers", month = aug # " 8-12", year = "2006", address = "Cambridge, Massachusetts, USA", publisher = "Association for Machine Translation in the Americas", url = "https://aclanthology.org/2006.amta-papers.25", pages = "223--231", } @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' UpperCamelCase = '''\ TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu (https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found here: https://github.com/jhclark/tercom. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information. ''' UpperCamelCase = ''' Produces TER scores alongside the number of edits and reference length. Args: predictions (list of str): The system stream (a sequence of segments). references (list of list of str): A list of one or more reference streams (each a sequence of segments). normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters, as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana. Only applies if `normalized = True`. Defaults to `False`. case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`. Returns: \'score\' (float): TER score (num_edits / sum_ref_lengths * 100) \'num_edits\' (int): The cumulative number of edits \'ref_length\' (float): The cumulative average reference length Examples: Example 1: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0} Example 2: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0} Example 3: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... normalized=True, ... case_sensitive=True) >>> print(results) {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5} Example 4: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0} Example 5: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class snake_case_ ( datasets.Metric ): def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple: if version.parse(scb.__version__ ) < version.parse("1.4.12" ): raise ImportWarning( "To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n" "You can install it with `pip install \"sacrebleu>=1.4.12\"`." ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[ "https://github.com/jhclark/tercom", ] , ) def __UpperCamelCase ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , ) -> Any: lowercase__ : Optional[int] = len(references[0] ) if any(len(lowercase_ ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) lowercase__ : Union[str, Any] = [[refs[i] for refs in references] for i in range(lowercase_ )] lowercase__ : str = TER( normalized=lowercase_ , no_punct=lowercase_ , asian_support=lowercase_ , case_sensitive=lowercase_ , ) lowercase__ : List[str] = sb_ter.corpus_score(lowercase_ , lowercase_ ) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
87
0
a__: Tuple = [ (1_000, 'M'), (900, 'CM'), (500, 'D'), (400, 'CD'), (100, 'C'), (90, 'XC'), (50, 'L'), (40, 'XL'), (10, 'X'), (9, 'IX'), (5, 'V'), (4, 'IV'), (1, 'I'), ] def UpperCamelCase__( UpperCamelCase__ : str )->Any: A__ = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 1_00, "D": 5_00, "M": 10_00} A__ = 0 A__ = 0 while place < len(_lowerCamelCase ): if (place + 1 < len(_lowerCamelCase )) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def UpperCamelCase__( UpperCamelCase__ : int )->List[Any]: A__ = [] for arabic, roman in ROMAN: (A__) = divmod(_lowerCamelCase , _lowerCamelCase ) result.append(roman * factor ) if number == 0: break return "".join(_lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
193
def lowercase_ ( _lowerCamelCase : int): lowercase__ : Dict = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
87
0
from argparse import ArgumentParser, Namespace from typing import Any, List, Optional from ..pipelines import Pipeline, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand try: from fastapi import Body, FastAPI, HTTPException from fastapi.routing import APIRoute from pydantic import BaseModel from starlette.responses import JSONResponse from uvicorn import run __UpperCamelCase : Union[str, Any] = True except (ImportError, AttributeError): __UpperCamelCase : Dict = object def A ( *_lowercase , **_lowercase ): pass __UpperCamelCase : List[Any] = False __UpperCamelCase : List[Any] = logging.get_logger('transformers-cli/serving') def A ( _lowercase ): SCREAMING_SNAKE_CASE : int = pipeline( task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , ) return ServeCommand(_lowerCamelCase , args.host , args.port , args.workers ) class lowercase__ ( __A): UpperCamelCase_ = 42 class lowercase__ ( __A): UpperCamelCase_ = 42 UpperCamelCase_ = 42 class lowercase__ ( __A): UpperCamelCase_ = 42 class lowercase__ ( __A): UpperCamelCase_ = 42 class lowercase__ ( __A): @staticmethod def __A ( UpperCamelCase__ : ArgumentParser ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = parser.add_parser( '''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' ) serve_parser.add_argument( '''--task''' , type=lowercase_ , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , ) serve_parser.add_argument('''--host''' , type=lowercase_ , default='''localhost''' , help='''Interface the server will listen on.''' ) serve_parser.add_argument('''--port''' , type=lowercase_ , default=8888 , help='''Port the serving will listen to.''' ) serve_parser.add_argument('''--workers''' , type=lowercase_ , default=1 , help='''Number of http workers''' ) serve_parser.add_argument('''--model''' , type=lowercase_ , help='''Model\'s name or path to stored model.''' ) serve_parser.add_argument('''--config''' , type=lowercase_ , help='''Model\'s config name or path to stored model.''' ) serve_parser.add_argument('''--tokenizer''' , type=lowercase_ , help='''Tokenizer name to use.''' ) serve_parser.add_argument( '''--device''' , type=lowercase_ , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , ) serve_parser.set_defaults(func=lowercase_ ) def __init__( self : int , UpperCamelCase__ : Pipeline , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = pipeline SCREAMING_SNAKE_CASE : Any = host SCREAMING_SNAKE_CASE : List[str] = port SCREAMING_SNAKE_CASE : List[Any] = workers if not _serve_dependencies_installed: raise RuntimeError( '''Using serve command requires FastAPI and uvicorn. ''' '''Please install transformers with [serving]: pip install \"transformers[serving]\".''' '''Or install FastAPI and uvicorn separately.''' ) else: logger.info(f"""Serving model over {host}:{port}""" ) SCREAMING_SNAKE_CASE : Optional[Any] = FastAPI( routes=[ APIRoute( '''/''' , self.model_info , response_model=lowercase_ , response_class=lowercase_ , methods=['''GET'''] , ), APIRoute( '''/tokenize''' , self.tokenize , response_model=lowercase_ , response_class=lowercase_ , methods=['''POST'''] , ), APIRoute( '''/detokenize''' , self.detokenize , response_model=lowercase_ , response_class=lowercase_ , methods=['''POST'''] , ), APIRoute( '''/forward''' , self.forward , response_model=lowercase_ , response_class=lowercase_ , methods=['''POST'''] , ), ] , timeout=600 , ) def __A ( self : int ): '''simple docstring''' run(self._app , host=self.host , port=self.port , workers=self.workers ) def __A ( self : List[Any] ): '''simple docstring''' return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) ) def __A ( self : Union[str, Any] , UpperCamelCase__ : str = Body(lowercase_ , embed=lowercase_ ) , UpperCamelCase__ : bool = Body(lowercase_ , embed=lowercase_ ) ): '''simple docstring''' try: SCREAMING_SNAKE_CASE : Union[str, Any] = self._pipeline.tokenizer.tokenize(lowercase_ ) if return_ids: SCREAMING_SNAKE_CASE : Union[str, Any] = self._pipeline.tokenizer.convert_tokens_to_ids(lowercase_ ) return ServeTokenizeResult(tokens=lowercase_ , tokens_ids=lowercase_ ) else: return ServeTokenizeResult(tokens=lowercase_ ) except Exception as e: raise HTTPException(status_code=500 , detail={'''model''': '''''', '''error''': str(lowercase_ )} ) def __A ( self : Optional[int] , UpperCamelCase__ : List[int] = Body(lowercase_ , embed=lowercase_ ) , UpperCamelCase__ : bool = Body(lowercase_ , embed=lowercase_ ) , UpperCamelCase__ : bool = Body(lowercase_ , embed=lowercase_ ) , ): '''simple docstring''' try: SCREAMING_SNAKE_CASE : str = self._pipeline.tokenizer.decode(lowercase_ , lowercase_ , lowercase_ ) return ServeDeTokenizeResult(model='''''' , text=lowercase_ ) except Exception as e: raise HTTPException(status_code=500 , detail={'''model''': '''''', '''error''': str(lowercase_ )} ) async def __A ( self : Tuple , UpperCamelCase__ : int=Body(lowercase_ , embed=lowercase_ ) ): '''simple docstring''' if len(lowercase_ ) == 0: return ServeForwardResult(output=[] , attention=[] ) try: # Forward through the model SCREAMING_SNAKE_CASE : int = self._pipeline(lowercase_ ) return ServeForwardResult(output=lowercase_ ) except Exception as e: raise HTTPException(500 , {'''error''': str(lowercase_ )} )
182
from PIL import Image def lowercase_ ( _lowerCamelCase : Image , _lowerCamelCase : int): lowercase__ : List[str] = (259 * (level + 255)) / (255 * (259 - level)) def contrast(_lowerCamelCase : int) -> int: return int(128 + factor * (c - 128)) return img.point(_lowerCamelCase) if __name__ == "__main__": # Load image with Image.open('''image_data/lena.jpg''') as img: # Change contrast to 170 UpperCamelCase = change_contrast(img, 170) cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
87
0
from math import factorial, radians def _UpperCamelCase ( lowercase__ , lowercase__ = 18 , lowercase__ = 10 ): __SCREAMING_SNAKE_CASE : Tuple = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0) # Converting from degrees to radians __SCREAMING_SNAKE_CASE : Union[str, Any] = radians(_lowerCamelCase ) __SCREAMING_SNAKE_CASE : Any = angle_in_radians __SCREAMING_SNAKE_CASE : List[str] = 3 __SCREAMING_SNAKE_CASE : List[str] = -1 for _ in range(_lowerCamelCase ): result += (b * (angle_in_radians**a)) / factorial(_lowerCamelCase ) __SCREAMING_SNAKE_CASE : List[Any] = -b # One positive term and the next will be negative and so on... a += 2 # Increased by 2 for every term. return round(_lowerCamelCase , _lowerCamelCase ) if __name__ == "__main__": __import__('doctest').testmod()
9
from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar UpperCamelCase = TypeVar('''T''') class snake_case_ ( Generic[T] ): __A : deque[T] # Cache store of keys __A : set[T] # References of the keys in cache __A : int = 10 # Maximum capacity of cache def __init__( self : Union[str, Any] , lowercase_ : int ) -> None: lowercase__ : int = deque() lowercase__ : str = set() if not n: lowercase__ : str = sys.maxsize elif n < 0: raise ValueError("n should be an integer greater than 0." ) else: lowercase__ : List[Any] = n def __UpperCamelCase ( self : Dict , lowercase_ : T ) -> None: if x not in self.key_reference: if len(self.dq_store ) == LRUCache._MAX_CAPACITY: lowercase__ : Dict = self.dq_store.pop() self.key_reference.remove(lowercase_ ) else: self.dq_store.remove(lowercase_ ) self.dq_store.appendleft(lowercase_ ) self.key_reference.add(lowercase_ ) def __UpperCamelCase ( self : Dict ) -> None: for k in self.dq_store: print(lowercase_ ) def __repr__( self : Optional[int] ) -> str: return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}''' if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase = LRUCache(4) lru_cache.refer('''A''') lru_cache.refer(2) lru_cache.refer(3) lru_cache.refer('''A''') lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() print(lru_cache) assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
87
0
"""simple docstring""" from __future__ import annotations import unittest import numpy as np from transformers import OPTConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_=None , A_=None ): if attention_mask is None: lowerCAmelCase__ : List[str] = tf.cast(tf.math.not_equal(_lowerCamelCase , config.pad_token_id ) , tf.inta ) return {"input_ids": input_ids, "attention_mask": attention_mask} @require_tf class SCREAMING_SNAKE_CASE : """simple docstring""" lowercase__ = OPTConfig lowercase__ = {} lowercase__ = "gelu" def __init__( self : Any ,lowercase_ : Optional[int] ,lowercase_ : Optional[Any]=1_3 ,lowercase_ : List[str]=7 ,lowercase_ : Tuple=True ,lowercase_ : Dict=False ,lowercase_ : Dict=9_9 ,lowercase_ : int=1_6 ,lowercase_ : Any=2 ,lowercase_ : int=4 ,lowercase_ : Optional[int]=4 ,lowercase_ : Optional[Any]="gelu" ,lowercase_ : List[str]=0.1 ,lowercase_ : Tuple=0.1 ,lowercase_ : Tuple=2_0 ,lowercase_ : List[str]=2 ,lowercase_ : Union[str, Any]=1 ,lowercase_ : Optional[Any]=0 ,lowercase_ : Union[str, Any]=1_6 ,lowercase_ : Dict=1_6 ,): lowerCAmelCase__ : Optional[int] = parent lowerCAmelCase__ : Optional[Any] = batch_size lowerCAmelCase__ : Dict = seq_length lowerCAmelCase__ : Optional[int] = is_training lowerCAmelCase__ : Union[str, Any] = use_labels lowerCAmelCase__ : Tuple = vocab_size lowerCAmelCase__ : Dict = hidden_size lowerCAmelCase__ : Dict = num_hidden_layers lowerCAmelCase__ : Union[str, Any] = num_attention_heads lowerCAmelCase__ : List[str] = intermediate_size lowerCAmelCase__ : List[str] = hidden_act lowerCAmelCase__ : List[str] = hidden_dropout_prob lowerCAmelCase__ : int = attention_probs_dropout_prob lowerCAmelCase__ : Dict = max_position_embeddings lowerCAmelCase__ : Optional[int] = eos_token_id lowerCAmelCase__ : Optional[int] = pad_token_id lowerCAmelCase__ : Dict = bos_token_id lowerCAmelCase__ : List[str] = embed_dim lowerCAmelCase__ : List[str] = word_embed_proj_dim lowerCAmelCase__ : List[str] = False def __lowerCAmelCase ( self : str ): lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size ) lowerCAmelCase__ : List[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 ) lowerCAmelCase__ : int = tf.concat([input_ids, eos_tensor] ,axis=1 ) lowerCAmelCase__ : Any = self.config_cls( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_id=self.eos_token_id ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,embed_dim=self.embed_dim ,word_embed_proj_dim=self.word_embed_proj_dim ,is_encoder_decoder=lowercase_ ,**self.config_updates ,) lowerCAmelCase__ : Union[str, Any] = prepare_opt_inputs_dict(lowercase_ ,lowercase_ ) return config, inputs_dict def __lowerCAmelCase ( self : Union[str, Any] ,lowercase_ : int ,lowercase_ : Optional[Any] ): lowerCAmelCase__ : Tuple = TFOPTModel(config=lowercase_ ) lowerCAmelCase__ : Optional[Any] = inputs_dict["input_ids"] lowerCAmelCase__ : Optional[int] = input_ids[:1, :] lowerCAmelCase__ : int = inputs_dict["attention_mask"][:1, :] lowerCAmelCase__ : List[Any] = 1 # first forward pass lowerCAmelCase__ : Any = model(lowercase_ ,attention_mask=lowercase_ ,use_cache=lowercase_ ) lowerCAmelCase__ : Tuple = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids lowerCAmelCase__ : Dict = ids_tensor((self.batch_size, 3) ,config.vocab_size ) lowerCAmelCase__ : str = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta ) # append to next input_ids and lowerCAmelCase__ : Tuple = tf.concat([input_ids, next_tokens] ,axis=-1 ) lowerCAmelCase__ : Dict = tf.concat([attention_mask, next_attn_mask] ,axis=-1 ) lowerCAmelCase__ : Union[str, Any] = model(lowercase_ ,attention_mask=lowercase_ )[0] lowerCAmelCase__ : Optional[Any] = model(lowercase_ ,attention_mask=lowercase_ ,past_key_values=lowercase_ )[0] self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] ) # select random slice lowerCAmelCase__ : Any = int(ids_tensor((1,) ,output_from_past.shape[-1] ) ) lowerCAmelCase__ : Dict = output_from_no_past[:, -3:, random_slice_idx] lowerCAmelCase__ : Any = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase_ ,lowercase_ ,rtol=1E-3 ) @require_tf class SCREAMING_SNAKE_CASE ( __A , __A , unittest.TestCase ): """simple docstring""" lowercase__ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else () lowercase__ = (TFOPTForCausalLM,) if is_tf_available() else () lowercase__ = ( {"feature-extraction": TFOPTModel, "text-generation": TFOPTForCausalLM} if is_tf_available() else {} ) lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = 10 def __lowerCAmelCase ( self : Dict ): lowerCAmelCase__ : List[str] = TFOPTModelTester(self ) lowerCAmelCase__ : Tuple = ConfigTester(self ,config_class=lowercase_ ) def __lowerCAmelCase ( self : Tuple ): self.config_tester.run_common_tests() def __lowerCAmelCase ( self : Union[str, Any] ): lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ ) def __lowerCAmelCase ( self : Optional[Any] ): lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() def _get_word_embedding_weight(lowercase_ : Tuple ,lowercase_ : str ): if hasattr(lowercase_ ,'''weight''' ): return embedding_layer.weight else: # Here we build the word embeddings weights if not exists. # And then we retry to get the attribute once built. model.build() if hasattr(lowercase_ ,'''weight''' ): return embedding_layer.weight else: return None for model_class in self.all_model_classes: for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]: # build the embeddings lowerCAmelCase__ : List[str] = model_class(config=lowercase_ ) lowerCAmelCase__ : List[Any] = _get_word_embedding_weight(lowercase_ ,model.get_input_embeddings() ) lowerCAmelCase__ : List[Any] = _get_word_embedding_weight(lowercase_ ,model.get_output_embeddings() ) # reshape the embeddings model.resize_token_embeddings(lowercase_ ) lowerCAmelCase__ : Optional[Any] = _get_word_embedding_weight(lowercase_ ,model.get_input_embeddings() ) lowerCAmelCase__ : Tuple = _get_word_embedding_weight(lowercase_ ,model.get_output_embeddings() ) # check that the resized embeddings size matches the desired size. lowerCAmelCase__ : Dict = size if size is not None else config.vocab_size self.assertEqual(new_input_embeddings.shape[0] ,lowercase_ ) # check that weights remain the same after resizing lowerCAmelCase__ : List[Any] = True for pa, pa in zip(old_input_embeddings.value() ,new_input_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: lowerCAmelCase__ : List[Any] = False self.assertTrue(lowercase_ ) if old_output_embeddings is not None and new_output_embeddings is not None: self.assertEqual(new_output_embeddings.shape[0] ,lowercase_ ) lowerCAmelCase__ : Dict = True for pa, pa in zip(old_output_embeddings.value() ,new_output_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: lowerCAmelCase__ : List[str] = False self.assertTrue(lowercase_ ) def __SCREAMING_SNAKE_CASE ( A_ ): return tf.constant(_lowerCamelCase , dtype=tf.intaa ) @require_tf class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" lowercase__ = 99 def __lowerCAmelCase ( self : str ): lowerCAmelCase__ : Any = tf.ones((4, 1) ,dtype=tf.intaa ) * 2 lowerCAmelCase__ : Optional[Any] = tf.concat([ids_tensor((4, 6) ,self.vocab_size - 3 ) + 3, eos_column_vector] ,axis=1 ) lowerCAmelCase__ : int = input_ids.shape[0] lowerCAmelCase__ : Any = OPTConfig( vocab_size=self.vocab_size ,hidden_size=2_4 ,num_hidden_layers=2 ,num_attention_heads=2 ,ffn_dim=3_2 ,max_position_embeddings=4_8 ,eos_token_id=2 ,pad_token_id=1 ,bos_token_id=0 ,) return config, input_ids, batch_size @require_sentencepiece @require_tf class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @slow def __lowerCAmelCase ( self : str ): lowerCAmelCase__ : Dict = TFOPTModel.from_pretrained('''facebook/opt-350m''' ) lowerCAmelCase__ : Dict = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] ) lowerCAmelCase__ : Tuple = tf.not_equal(lowercase_ ,model.config.pad_token_id ) with tf.GradientTape(): lowerCAmelCase__ : Union[str, Any] = model(input_ids=lowercase_ ,attention_mask=lowercase_ ).last_hidden_state lowerCAmelCase__ : str = (1, 1_1, 5_1_2) self.assertEqual(output.shape ,lowercase_ ) lowerCAmelCase__ : str = tf.constant( [[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] ) self.assertTrue(np.allclose(output[:, :3, :3] ,lowercase_ ,atol=4E-3 ) ) lowerCAmelCase__ : Union[str, Any] = tf.function(lowercase_ ,jit_compile=lowercase_ ) lowerCAmelCase__ : List[str] = xla_generate(lowercase_ ,lowercase_ )[0] self.assertTrue(np.allclose(output[:, :3, :3] ,lowercase_ ,atol=4E-2 ) ) @require_tf @slow class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __lowerCAmelCase ( self : Dict ): super().setUp() lowerCAmelCase__ : str = "facebook/opt-350m" def __lowerCAmelCase ( self : Optional[Any] ): lowerCAmelCase__ : Dict = TFOPTForCausalLM.from_pretrained(self.path_model ) lowerCAmelCase__ : Any = GPTaTokenizer.from_pretrained(self.path_model ) lowerCAmelCase__ : List[str] = [ "Today is a beautiful day and I want to", "In the city of", "Paris is the capital of France and", "Computers and mobile phones have taken", ] # verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False lowerCAmelCase__ : Tuple = tokenizer(lowercase_ ,return_tensors='''tf''' ,padding=lowercase_ ,add_special_tokens=lowercase_ ) lowerCAmelCase__ : List[str] = tf.math.reduce_mean(model(inputs.input_ids ,attention_mask=inputs.attention_mask )[0] ,axis=-1 ) lowerCAmelCase__ : Optional[Any] = tf.constant( [ [1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670], [-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822], [0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703], [6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477], ] ) self.assertTrue(np.allclose(lowercase_ ,lowercase_ ,atol=1E-4 ) ) lowerCAmelCase__ : Dict = tf.function(lowercase_ ,jit_compile=lowercase_ ) lowerCAmelCase__ : Any = tf.math.reduce_mean(xla_generate(inputs.input_ids ,attention_mask=inputs.attention_mask )[0] ,axis=-1 ) self.assertTrue(np.allclose(lowercase_ ,lowercase_ ,atol=1E-4 ) ) @require_tf @slow class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @property def __lowerCAmelCase ( self : int ): return [ "Today is a beautiful day and I want", "In the city of", "Paris is the capital of France and", "Computers and mobile phones have taken", ] def __lowerCAmelCase ( self : List[Any] ): lowerCAmelCase__ : List[Any] = "facebook/opt-125m" lowerCAmelCase__ : Optional[Any] = [ "Today is a beautiful day and I want to", "In the city of New York, the city", "Paris is the capital of France and the capital", "Computers and mobile phones have taken over the", ] lowerCAmelCase__ : List[Any] = [] lowerCAmelCase__ : Union[str, Any] = GPTaTokenizer.from_pretrained(lowercase_ ) lowerCAmelCase__ : List[Any] = TFOPTForCausalLM.from_pretrained(lowercase_ ) for prompt in self.prompts: lowerCAmelCase__ : Tuple = tokenizer(lowercase_ ,return_tensors='''tf''' ).input_ids lowerCAmelCase__ : Any = model.generate(lowercase_ ,max_length=1_0 ) lowerCAmelCase__ : Optional[int] = tokenizer.batch_decode(lowercase_ ,skip_special_tokens=lowercase_ ) predicted_outputs += generated_string self.assertListEqual(lowercase_ ,lowercase_ ) def __lowerCAmelCase ( self : List[str] ): lowerCAmelCase__ : Dict = "facebook/opt-350m" lowerCAmelCase__ : Optional[int] = GPTaTokenizer.from_pretrained(lowercase_ ) lowerCAmelCase__ : Optional[Any] = TFOPTForCausalLM.from_pretrained(lowercase_ ) lowerCAmelCase__ : Tuple = "left" # use different length sentences to test batching lowerCAmelCase__ : str = [ "Hello, my dog is a little", "Today, I", ] lowerCAmelCase__ : List[str] = tokenizer(lowercase_ ,return_tensors='''tf''' ,padding=lowercase_ ) lowerCAmelCase__ : Dict = inputs["input_ids"] lowerCAmelCase__ : int = model.generate(input_ids=lowercase_ ,attention_mask=inputs['''attention_mask'''] ) lowerCAmelCase__ : Tuple = tokenizer(sentences[0] ,return_tensors='''tf''' ).input_ids lowerCAmelCase__ : Dict = model.generate(input_ids=lowercase_ ) lowerCAmelCase__ : Union[str, Any] = inputs_non_padded.shape[-1] - tf.math.reduce_sum( tf.cast(inputs['''attention_mask'''][-1] ,tf.intaa ) ) lowerCAmelCase__ : Optional[Any] = tokenizer(sentences[1] ,return_tensors='''tf''' ).input_ids lowerCAmelCase__ : List[str] = model.generate(input_ids=lowercase_ ,max_length=model.config.max_length - num_paddings ) lowerCAmelCase__ : List[str] = tokenizer.batch_decode(lowercase_ ,skip_special_tokens=lowercase_ ) lowerCAmelCase__ : Tuple = tokenizer.decode(output_non_padded[0] ,skip_special_tokens=lowercase_ ) lowerCAmelCase__ : Tuple = tokenizer.decode(output_padded[0] ,skip_special_tokens=lowercase_ ) lowerCAmelCase__ : List[Any] = [ "Hello, my dog is a little bit of a dork.\nI'm a little bit", "Today, I was in the middle of a conversation with a friend about the", ] self.assertListEqual(lowercase_ ,lowercase_ ) self.assertListEqual(lowercase_ ,[non_padded_sentence, padded_sentence] ) def __lowerCAmelCase ( self : Tuple ): lowerCAmelCase__ : Optional[int] = "facebook/opt-350m" lowerCAmelCase__ : Union[str, Any] = [ "Today is a beautiful day and I want to", "In the city of San Francisco, the city", "Paris is the capital of France and the capital", "Computers and mobile phones have taken over the", ] lowerCAmelCase__ : Optional[int] = [] lowerCAmelCase__ : Union[str, Any] = GPTaTokenizer.from_pretrained(lowercase_ ) lowerCAmelCase__ : Optional[int] = TFOPTForCausalLM.from_pretrained(lowercase_ ) for prompt in self.prompts: lowerCAmelCase__ : List[str] = tokenizer(lowercase_ ,return_tensors='''tf''' ).input_ids lowerCAmelCase__ : Optional[Any] = model.generate(lowercase_ ,max_length=1_0 ) lowerCAmelCase__ : int = tokenizer.batch_decode(lowercase_ ,skip_special_tokens=lowercase_ ) predicted_outputs += generated_string self.assertListEqual(lowercase_ ,lowercase_ )
106
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''', '''YituTech/conv-bert-medium-small''': ( '''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json''' ), '''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''', # See all ConvBERT models at https://huggingface.co/models?filter=convbert } class snake_case_ ( __A ): __A : List[str] = "convbert" def __init__( self : Union[str, Any] , lowercase_ : str=3_05_22 , lowercase_ : Any=7_68 , lowercase_ : Tuple=12 , lowercase_ : List[str]=12 , lowercase_ : Optional[int]=30_72 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : str=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : Optional[Any]=5_12 , lowercase_ : Dict=2 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Optional[Any]=1E-12 , lowercase_ : Optional[int]=1 , lowercase_ : List[Any]=0 , lowercase_ : Optional[int]=2 , lowercase_ : str=7_68 , lowercase_ : Dict=2 , lowercase_ : Optional[Any]=9 , lowercase_ : Union[str, Any]=1 , lowercase_ : Any=None , **lowercase_ : Optional[Any] , ) -> Dict: super().__init__( pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ , ) lowercase__ : List[str] = vocab_size lowercase__ : Union[str, Any] = hidden_size lowercase__ : Any = num_hidden_layers lowercase__ : List[str] = num_attention_heads lowercase__ : Union[str, Any] = intermediate_size lowercase__ : Optional[Any] = hidden_act lowercase__ : int = hidden_dropout_prob lowercase__ : str = attention_probs_dropout_prob lowercase__ : Union[str, Any] = max_position_embeddings lowercase__ : Optional[int] = type_vocab_size lowercase__ : Tuple = initializer_range lowercase__ : List[str] = layer_norm_eps lowercase__ : List[Any] = embedding_size lowercase__ : Optional[Any] = head_ratio lowercase__ : Dict = conv_kernel_size lowercase__ : Tuple = num_groups lowercase__ : Optional[int] = classifier_dropout class snake_case_ ( __A ): @property def __UpperCamelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowercase__ : Tuple = {0: "batch", 1: "choice", 2: "sequence"} else: lowercase__ : str = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis), ] )
87
0
A : Any = 'Tobias Carryer' from time import time class A : '''simple docstring''' def __init__(self : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int]=int(time() ) ) -> Dict: # noqa: B008 """simple docstring""" lowercase__ = multiplier lowercase__ = increment lowercase__ = modulo lowercase__ = seed def lowerCamelCase__ (self : List[Any] ) -> Dict: """simple docstring""" lowercase__ = (self.multiplier * self.seed + self.increment) % self.modulo return self.seed if __name__ == "__main__": # Show the LCG in action. A : List[Any] = LinearCongruentialGenerator(1_6_6_4_5_2_5, 1_0_1_3_9_0_4_2_2_3, 2 << 3_1) while True: print(lcg.next_number())
305
import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict): # Initialise PyTorch model lowercase__ : List[str] = BertConfig.from_json_file(_lowerCamelCase) print(f'''Building PyTorch model from configuration: {config}''') lowercase__ : Optional[Any] = BertForPreTraining(_lowerCamelCase) # Load weights from tf checkpoint load_tf_weights_in_bert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''') torch.save(model.state_dict() , _lowerCamelCase) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--bert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained BERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) UpperCamelCase = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
87
0
import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def lowerCAmelCase_ ( _lowercase : str , _lowercase : List[Any] , _lowercase : Dict) -> Union[str, Any]: """simple docstring""" # Initialise PyTorch model a__ : List[str] = BertConfig.from_json_file(_lowerCamelCase) print(F'''Building PyTorch model from configuration: {config}''') a__ : Optional[Any] = BertForPreTraining(_lowerCamelCase) # Load weights from tf checkpoint load_tf_weights_in_bert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''') torch.save(model.state_dict() , _lowerCamelCase) if __name__ == "__main__": _lowercase : Tuple =argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--bert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) _lowercase : Optional[int] =parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
170
import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse('''3.8'''): import importlib_metadata else: import importlib.metadata as importlib_metadata def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : List[str]=False): try: lowercase__ : Union[str, Any] = os.environ[key] except KeyError: # KEY isn't set, default to `default`. lowercase__ : int = default else: # KEY is set, convert it to True or False. try: lowercase__ : Optional[int] = strtobool(_lowerCamelCase) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f'''If set, {key} must be yes or no.''') return _value UpperCamelCase = parse_flag_from_env('''RUN_SLOW''', default=False) UpperCamelCase = parse_flag_from_env('''RUN_REMOTE''', default=False) UpperCamelCase = parse_flag_from_env('''RUN_LOCAL''', default=True) UpperCamelCase = parse_flag_from_env('''RUN_PACKAGED''', default=True) # Compression UpperCamelCase = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''') UpperCamelCase = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''') UpperCamelCase = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''') # Audio UpperCamelCase = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''), reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''', ) # Beam UpperCamelCase = pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''), reason='''test requires apache-beam and a compatible dill version''', ) # Dill-cloudpickle compatibility UpperCamelCase = pytest.mark.skipif( config.DILL_VERSION <= version.parse('''0.3.2'''), reason='''test requires dill>0.3.2 for cloudpickle compatibility''', ) # Windows UpperCamelCase = pytest.mark.skipif( sys.platform == '''win32''', reason='''test should not be run on Windows''', ) def lowercase_ ( _lowerCamelCase : int): try: import faiss # noqa except ImportError: lowercase__ : Optional[Any] = unittest.skip("test requires faiss")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): try: import regex # noqa except ImportError: lowercase__ : List[Any] = unittest.skip("test requires regex")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): try: import elasticsearch # noqa except ImportError: lowercase__ : Optional[int] = unittest.skip("test requires elasticsearch")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Union[str, Any]): try: import sqlalchemy # noqa except ImportError: lowercase__ : Optional[int] = unittest.skip("test requires sqlalchemy")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): if not config.TORCH_AVAILABLE: lowercase__ : Tuple = unittest.skip("test requires PyTorch")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Tuple): if not config.TF_AVAILABLE: lowercase__ : Any = unittest.skip("test requires TensorFlow")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Dict): if not config.JAX_AVAILABLE: lowercase__ : List[str] = unittest.skip("test requires JAX")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): if not config.PIL_AVAILABLE: lowercase__ : Dict = unittest.skip("test requires Pillow")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Tuple): try: import transformers # noqa F401 except ImportError: return unittest.skip("test requires transformers")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : Optional[Any]): try: import tiktoken # noqa F401 except ImportError: return unittest.skip("test requires tiktoken")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : Dict): try: import spacy # noqa F401 except ImportError: return unittest.skip("test requires spacy")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : Optional[int]): def _require_spacy_model(_lowerCamelCase : Optional[int]): try: import spacy # noqa F401 spacy.load(_lowerCamelCase) except ImportError: return unittest.skip("test requires spacy")(_lowerCamelCase) except OSError: return unittest.skip("test requires spacy model '{}'".format(_lowerCamelCase))(_lowerCamelCase) else: return test_case return _require_spacy_model def lowercase_ ( _lowerCamelCase : Dict): try: import pyspark # noqa F401 except ImportError: return unittest.skip("test requires pyspark")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : List[str]): try: import joblibspark # noqa F401 except ImportError: return unittest.skip("test requires joblibspark")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : Dict): if not _run_slow_tests or _run_slow_tests == 0: lowercase__ : Tuple = unittest.skip("test is slow")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): if not _run_local_tests or _run_local_tests == 0: lowercase__ : str = unittest.skip("test is local")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Optional[int]): if not _run_packaged_tests or _run_packaged_tests == 0: lowercase__ : List[Any] = unittest.skip("test is packaged")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Tuple): if not _run_remote_tests or _run_remote_tests == 0: lowercase__ : Union[str, Any] = unittest.skip("test requires remote")(_lowerCamelCase) return test_case def lowercase_ ( *_lowerCamelCase : str): def decorate(cls : str): for name, fn in cls.__dict__.items(): if callable(_lowerCamelCase) and name.startswith("test"): for decorator in decorators: lowercase__ : Optional[int] = decorator(_lowerCamelCase) setattr(cls , _lowerCamelCase , _lowerCamelCase) return cls return decorate class snake_case_ ( __A ): pass class snake_case_ ( __A ): __A : List[Any] = 0 __A : str = 1 __A : int = 2 @contextmanager def lowercase_ ( _lowerCamelCase : List[str]=OfflineSimulationMode.CONNECTION_FAILS , _lowerCamelCase : int=1E-16): lowercase__ : int = requests.Session().request def timeout_request(_lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Dict , **_lowerCamelCase : str): # Change the url to an invalid url so that the connection hangs lowercase__ : Any = "https://10.255.255.1" if kwargs.get("timeout") is None: raise RequestWouldHangIndefinitelyError( f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''') lowercase__ : Dict = timeout try: return online_request(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier lowercase__ : Dict = url lowercase__ : Union[str, Any] = e.args[0] lowercase__ : Optional[Any] = (max_retry_error.args[0].replace("10.255.255.1" , f'''OfflineMock[{url}]'''),) lowercase__ : int = (max_retry_error,) raise def raise_connection_error(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] , **_lowerCamelCase : Tuple): raise requests.ConnectionError("Offline mode is enabled." , request=_lowerCamelCase) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch("requests.Session.send" , _lowerCamelCase): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch("requests.Session.request" , _lowerCamelCase): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase): yield else: raise ValueError("Please use a value from the OfflineSimulationMode enum.") @contextmanager def lowercase_ ( *_lowerCamelCase : str , **_lowerCamelCase : Tuple): lowercase__ : Dict = str(Path().resolve()) with tempfile.TemporaryDirectory(*_lowerCamelCase , **_lowerCamelCase) as tmp_dir: try: os.chdir(_lowerCamelCase) yield finally: os.chdir(_lowerCamelCase) @contextmanager def lowercase_ ( ): import gc gc.collect() lowercase__ : Union[str, Any] = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def lowercase_ ( ): import gc gc.collect() lowercase__ : int = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]): return deepcopy(_lowerCamelCase).integers(0 , 100 , 10).tolist() == deepcopy(_lowerCamelCase).integers(0 , 100 , 10).tolist() def lowercase_ ( _lowerCamelCase : str): import decorator from requests.exceptions import HTTPError def _wrapper(_lowerCamelCase : str , *_lowerCamelCase : Dict , **_lowerCamelCase : Dict): try: return func(*_lowerCamelCase , **_lowerCamelCase) except HTTPError as err: if str(_lowerCamelCase).startswith("500") or str(_lowerCamelCase).startswith("502"): pytest.xfail(str(_lowerCamelCase)) raise err return decorator.decorator(_wrapper , _lowerCamelCase) class snake_case_ : def __init__( self : int , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : List[str] ) -> List[str]: lowercase__ : Tuple = returncode lowercase__ : int = stdout lowercase__ : Union[str, Any] = stderr async def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict): while True: lowercase__ : Optional[int] = await stream.readline() if line: callback(_lowerCamelCase) else: break async def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : int=None , _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Tuple=False): if echo: print("\nRunning: " , " ".join(_lowerCamelCase)) lowercase__ : Optional[int] = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) lowercase__ : str = [] lowercase__ : List[str] = [] def tee(_lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int]=""): lowercase__ : Optional[int] = line.decode("utf-8").rstrip() sink.append(_lowerCamelCase) if not quiet: print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label="stdout:")), _read_stream(p.stderr , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label="stderr:")), ] , timeout=_lowerCamelCase , ) return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase) def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str]=None , _lowerCamelCase : Dict=None , _lowerCamelCase : int=180 , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Optional[Any]=True): lowercase__ : Any = asyncio.get_event_loop() lowercase__ : Tuple = loop.run_until_complete( _stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase)) lowercase__ : int = " ".join(_lowerCamelCase) if result.returncode > 0: lowercase__ : Any = "\n".join(result.stderr) raise RuntimeError( f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n''' f'''The combined stderr from workers follows:\n{stderr}''') # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(f'''\'{cmd_str}\' produced no output.''') return result def lowercase_ ( ): lowercase__ : List[str] = os.environ.get("PYTEST_XDIST_WORKER" , "gw0") lowercase__ : str = re.sub(R"^gw" , "" , _lowerCamelCase , 0 , re.M) return int(_lowerCamelCase) def lowercase_ ( ): lowercase__ : Union[str, Any] = 2_9500 lowercase__ : Optional[int] = pytest_xdist_worker_id() return port + uniq_delta
87
0
'''simple docstring''' import glob import os import random from string import ascii_lowercase, digits import cva a__ : Union[str, Any] = '' a__ : Optional[int] = '' a__ : Tuple = '' a__ : Union[str, Any] = 1 # (0 is vertical, 1 is horizontal) def _UpperCamelCase ( ) -> str: '''simple docstring''' UpperCamelCase__ = get_dataset(_lowerCamelCase , _lowerCamelCase ) print("Processing..." ) UpperCamelCase__ = update_image_and_anno(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) for index, image in enumerate(_lowerCamelCase ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' UpperCamelCase__ = random_chars(32 ) UpperCamelCase__ = paths[index].split(os.sep )[-1].rsplit("." , 1 )[0] UpperCamelCase__ = F'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}''' cva.imwrite(F'''/{file_root}.jpg''' , _lowerCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(F'''Success {index+1}/{len(_lowerCamelCase )} with {file_name}''' ) UpperCamelCase__ = [] for anno in new_annos[index]: UpperCamelCase__ = F'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}''' annos_list.append(_lowerCamelCase ) with open(F'''/{file_root}.txt''' , "w" ) as outfile: outfile.write("\n".join(line for line in annos_list ) ) def _UpperCamelCase ( __A , __A ) -> List[Any]: '''simple docstring''' UpperCamelCase__ = [] UpperCamelCase__ = [] for label_file in glob.glob(os.path.join(_lowerCamelCase , "*.txt" ) ): UpperCamelCase__ = label_file.split(os.sep )[-1].rsplit("." , 1 )[0] with open(_lowerCamelCase ) as in_file: UpperCamelCase__ = in_file.readlines() UpperCamelCase__ = os.path.join(_lowerCamelCase , F'''{label_name}.jpg''' ) UpperCamelCase__ = [] for obj_list in obj_lists: UpperCamelCase__ = obj_list.rstrip("\n" ).split(" " ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(_lowerCamelCase ) labels.append(_lowerCamelCase ) return img_paths, labels def _UpperCamelCase ( __A , __A , __A = 1 ) -> Optional[int]: '''simple docstring''' UpperCamelCase__ = [] UpperCamelCase__ = [] UpperCamelCase__ = [] for idx in range(len(_lowerCamelCase ) ): UpperCamelCase__ = [] UpperCamelCase__ = img_list[idx] path_list.append(_lowerCamelCase ) UpperCamelCase__ = anno_list[idx] UpperCamelCase__ = cva.imread(_lowerCamelCase ) if flip_type == 1: UpperCamelCase__ = cva.flip(_lowerCamelCase , _lowerCamelCase ) for bbox in img_annos: UpperCamelCase__ = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: UpperCamelCase__ = cva.flip(_lowerCamelCase , _lowerCamelCase ) for bbox in img_annos: UpperCamelCase__ = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(_lowerCamelCase ) new_imgs_list.append(_lowerCamelCase ) return new_imgs_list, new_annos_lists, path_list def _UpperCamelCase ( __A = 32 ) -> Any: '''simple docstring''' assert number_char > 1, "The number of character should greater than 1" UpperCamelCase__ = ascii_lowercase + digits return "".join(random.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) ) if __name__ == "__main__": main() print('DONE ✅')
80
import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def lowercase_ ( _lowerCamelCase : int): lowercase__ : int = [] embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''', f'''stage{idx}.patch_embed.proj.weight''', )) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''', f'''stage{idx}.patch_embed.proj.bias''', )) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''', f'''stage{idx}.patch_embed.norm.weight''', )) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''', f'''stage{idx}.patch_embed.norm.bias''', )) return embed def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : int): lowercase__ : Optional[Any] = [] attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj.bias''', )) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''')) return attention_weights def lowercase_ ( _lowerCamelCase : Optional[int]): lowercase__ : Tuple = [] token.append((f'''cvt.encoder.stages.{idx}.cls_token''', "stage2.cls_token")) return token def lowercase_ ( ): lowercase__ : List[str] = [] head.append(("layernorm.weight", "norm.weight")) head.append(("layernorm.bias", "norm.bias")) head.append(("classifier.weight", "head.weight")) head.append(("classifier.bias", "head.bias")) return head def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]): lowercase__ : Optional[Any] = "imagenet-1k-id2label.json" lowercase__ : List[str] = 1000 lowercase__ : Dict = "huggingface/label-files" lowercase__ : List[Any] = num_labels lowercase__ : Tuple = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset")) , "r")) lowercase__ : Tuple = {int(_lowerCamelCase): v for k, v in idalabel.items()} lowercase__ : Any = idalabel lowercase__ : List[Any] = {v: k for k, v in idalabel.items()} lowercase__ : Optional[int] = CvtConfig(num_labels=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit("/" , 1)[-1][4:6] == "13": lowercase__ : Any = [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit("/" , 1)[-1][4:6] == "21": lowercase__ : Tuple = [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: lowercase__ : Union[str, Any] = [2, 2, 20] lowercase__ : Optional[Any] = [3, 12, 16] lowercase__ : Optional[Any] = [192, 768, 1024] lowercase__ : Union[str, Any] = CvtForImageClassification(_lowerCamelCase) lowercase__ : Tuple = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k") lowercase__ : int = image_size lowercase__ : Dict = torch.load(_lowerCamelCase , map_location=torch.device("cpu")) lowercase__ : Any = OrderedDict() lowercase__ : int = [] for idx in range(len(config.depth)): if config.cls_token[idx]: lowercase__ : Dict = list_of_state_dict + cls_token(_lowerCamelCase) lowercase__ : List[str] = list_of_state_dict + embeddings(_lowerCamelCase) for cnt in range(config.depth[idx]): lowercase__ : Any = list_of_state_dict + attention(_lowerCamelCase , _lowerCamelCase) lowercase__ : List[str] = list_of_state_dict + final() for gg in list_of_state_dict: print(_lowerCamelCase) for i in range(len(_lowerCamelCase)): lowercase__ : Dict = original_weights[list_of_state_dict[i][1]] model.load_state_dict(_lowerCamelCase) model.save_pretrained(_lowerCamelCase) image_processor.save_pretrained(_lowerCamelCase) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument( '''--cvt_model''', default='''cvt-w24''', type=str, help='''Name of the cvt model you\'d like to convert.''', ) parser.add_argument( '''--image_size''', default=384, type=int, help='''Input Image Size''', ) parser.add_argument( '''--cvt_file_name''', default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''', type=str, help='''Input Image Size''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) UpperCamelCase = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
87
0
def a_ ( SCREAMING_SNAKE_CASE__ : dict ): '''simple docstring''' _lowerCamelCase : set[int] =set() # To detect a back edge, keep track of vertices currently in the recursion stack _lowerCamelCase : set[int] =set() return any( node not in visited and depth_first_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) for node in graph ) def a_ ( SCREAMING_SNAKE_CASE__ : dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : set , SCREAMING_SNAKE_CASE__ : set ): '''simple docstring''' visited.add(_lowerCamelCase ) rec_stk.add(_lowerCamelCase ) for node in graph[vertex]: if node not in visited: if depth_first_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): return True elif node in rec_stk: return True # The node needs to be removed from recursion stack before function ends rec_stk.remove(_lowerCamelCase ) return False if __name__ == "__main__": from doctest import testmod testmod()
199
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase = { '''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''], '''tokenization_electra''': ['''ElectraTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''ElectraTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ElectraForCausalLM''', '''ElectraForMaskedLM''', '''ElectraForMultipleChoice''', '''ElectraForPreTraining''', '''ElectraForQuestionAnswering''', '''ElectraForSequenceClassification''', '''ElectraForTokenClassification''', '''ElectraModel''', '''ElectraPreTrainedModel''', '''load_tf_weights_in_electra''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFElectraForMaskedLM''', '''TFElectraForMultipleChoice''', '''TFElectraForPreTraining''', '''TFElectraForQuestionAnswering''', '''TFElectraForSequenceClassification''', '''TFElectraForTokenClassification''', '''TFElectraModel''', '''TFElectraPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''FlaxElectraForCausalLM''', '''FlaxElectraForMaskedLM''', '''FlaxElectraForMultipleChoice''', '''FlaxElectraForPreTraining''', '''FlaxElectraForQuestionAnswering''', '''FlaxElectraForSequenceClassification''', '''FlaxElectraForTokenClassification''', '''FlaxElectraModel''', '''FlaxElectraPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
87
0
"""simple docstring""" from __future__ import annotations import csv import requests from bsa import BeautifulSoup def lowercase ( __snake_case : str = "" ): lowercase_ : str = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250" lowercase_ : int = BeautifulSoup(requests.get(_lowerCamelCase ).text , '''html.parser''' ) lowercase_ : int = soup.find_all('''td''' , attrs='''titleColumn''' ) lowercase_ : Any = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' ) return { title.a.text: float(rating.strong.text ) for title, rating in zip(_lowerCamelCase , _lowerCamelCase ) } def lowercase ( __snake_case : str = "IMDb_Top_250_Movies.csv" ): lowercase_ : Dict = get_imdb_top_aaa_movies() with open(_lowerCamelCase , '''w''' , newline='''''' ) as out_file: lowercase_ : Optional[int] = csv.writer(_lowerCamelCase ) writer.writerow(['''Movie title''', '''IMDb rating'''] ) for title, rating in movies.items(): writer.writerow([title, rating] ) if __name__ == "__main__": write_movies()
33
import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case_ ( __A ,unittest.TestCase ): __A : Union[str, Any] = LEDTokenizer __A : Union[str, Any] = LEDTokenizerFast __A : Optional[Any] = True def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]: super().setUp() lowercase__ : List[str] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] lowercase__ : Optional[int] = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) ) lowercase__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] lowercase__ : Tuple = {"unk_token": "<unk>"} lowercase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowercase_ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowercase_ ) ) def __UpperCamelCase ( self : int , **lowercase_ : str ) -> List[Any]: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ ) def __UpperCamelCase ( self : List[Any] , **lowercase_ : Any ) -> List[Any]: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ ) def __UpperCamelCase ( self : str , lowercase_ : Any ) -> Tuple: return "lower newer", "lower newer" @cached_property def __UpperCamelCase ( self : Tuple ) -> Optional[Any]: return LEDTokenizer.from_pretrained("allenai/led-base-16384" ) @cached_property def __UpperCamelCase ( self : Tuple ) -> int: return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" ) @require_torch def __UpperCamelCase ( self : int ) -> List[Any]: lowercase__ : Union[str, Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."] lowercase__ : str = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : Dict = tokenizer(lowercase_ , max_length=len(lowercase_ ) , padding=lowercase_ , return_tensors="pt" ) self.assertIsInstance(lowercase_ , lowercase_ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) lowercase__ : Union[str, Any] = batch.input_ids.tolist()[0] self.assertListEqual(lowercase_ , lowercase_ ) @require_torch def __UpperCamelCase ( self : List[str] ) -> Tuple: lowercase__ : Dict = ["A long paragraph for summarization.", "Another paragraph for summarization."] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : Optional[int] = tokenizer(lowercase_ , padding=lowercase_ , return_tensors="pt" ) self.assertIn("input_ids" , lowercase_ ) self.assertIn("attention_mask" , lowercase_ ) self.assertNotIn("labels" , lowercase_ ) self.assertNotIn("decoder_attention_mask" , lowercase_ ) @require_torch def __UpperCamelCase ( self : Optional[Any] ) -> Any: lowercase__ : Dict = [ "Summary of the text.", "Another summary.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : Dict = tokenizer(text_target=lowercase_ , max_length=32 , padding="max_length" , return_tensors="pt" ) self.assertEqual(32 , targets["input_ids"].shape[1] ) @require_torch def __UpperCamelCase ( self : Optional[int] ) -> Tuple: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : int = tokenizer( ["I am a small frog" * 10_24, "I am a small frog"] , padding=lowercase_ , truncation=lowercase_ , return_tensors="pt" ) self.assertIsInstance(lowercase_ , lowercase_ ) self.assertEqual(batch.input_ids.shape , (2, 51_22) ) @require_torch def __UpperCamelCase ( self : List[str] ) -> Any: lowercase__ : Union[str, Any] = ["A long paragraph for summarization."] lowercase__ : List[Any] = [ "Summary of the text.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : List[Any] = tokenizer(lowercase_ , return_tensors="pt" ) lowercase__ : Dict = tokenizer(text_target=lowercase_ , return_tensors="pt" ) lowercase__ : Optional[int] = inputs["input_ids"] lowercase__ : str = targets["input_ids"] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def __UpperCamelCase ( self : Union[str, Any] ) -> Dict: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : int = ["Summary of the text.", "Another summary."] lowercase__ : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] lowercase__ : Tuple = tokenizer(lowercase_ , padding=lowercase_ ) lowercase__ : int = [[0] * len(lowercase_ ) for x in encoded_output["input_ids"]] lowercase__ : Any = tokenizer.pad(lowercase_ ) self.assertSequenceEqual(outputs["global_attention_mask"] , lowercase_ ) def __UpperCamelCase ( self : int ) -> Union[str, Any]: pass def __UpperCamelCase ( self : int ) -> Optional[Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) lowercase__ : List[str] = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) lowercase__ : List[Any] = "A, <mask> AllenNLP sentence." lowercase__ : Tuple = tokenizer_r.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ ) lowercase__ : List[str] = tokenizer_p.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ ) self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) ) self.assertEqual( sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , ) lowercase__ : Any = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] ) lowercase__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] ) self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
87
0
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A__: Tuple = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__: List[Any] = ['''TimmBackbone'''] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys A__: str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
276
import math from typing import Any, Callable, List, Optional, Tuple, Union import numpy as np import torch from ...models import TaFilmDecoder from ...schedulers import DDPMScheduler from ...utils import is_onnx_available, logging, randn_tensor if is_onnx_available(): from ..onnx_utils import OnnxRuntimeModel from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .continous_encoder import SpectrogramContEncoder from .notes_encoder import SpectrogramNotesEncoder UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name UpperCamelCase = 256 class snake_case_ ( __A ): __A : str = ["melgan"] def __init__( self : str , lowercase_ : SpectrogramNotesEncoder , lowercase_ : SpectrogramContEncoder , lowercase_ : TaFilmDecoder , lowercase_ : DDPMScheduler , lowercase_ : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None: super().__init__() # From MELGAN lowercase__ : List[Any] = math.log(1E-5 ) # Matches MelGAN training. lowercase__ : str = 4.0 # Largest value for most examples lowercase__ : Any = 1_28 self.register_modules( notes_encoder=lowercase_ , continuous_encoder=lowercase_ , decoder=lowercase_ , scheduler=lowercase_ , melgan=lowercase_ , ) def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str]=(-1.0, 1.0) , lowercase_ : Dict=False ) -> Optional[Any]: lowercase__ , lowercase__ : int = output_range if clip: lowercase__ : Optional[Any] = torch.clip(lowercase_ , self.min_value , self.max_value ) # Scale to [0, 1]. lowercase__ : List[str] = (features - self.min_value) / (self.max_value - self.min_value) # Scale to [min_out, max_out]. return zero_one * (max_out - min_out) + min_out def __UpperCamelCase ( self : Optional[int] , lowercase_ : List[str] , lowercase_ : List[str]=(-1.0, 1.0) , lowercase_ : List[Any]=False ) -> Union[str, Any]: lowercase__ , lowercase__ : Tuple = input_range lowercase__ : Optional[Any] = torch.clip(lowercase_ , lowercase_ , lowercase_ ) if clip else outputs # Scale to [0, 1]. lowercase__ : Union[str, Any] = (outputs - min_out) / (max_out - min_out) # Scale to [self.min_value, self.max_value]. return zero_one * (self.max_value - self.min_value) + self.min_value def __UpperCamelCase ( self : List[str] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Tuple ) -> List[str]: lowercase__ : Optional[Any] = input_tokens > 0 lowercase__ , lowercase__ : int = self.notes_encoder( encoder_input_tokens=lowercase_ , encoder_inputs_mask=lowercase_ ) lowercase__ , lowercase__ : List[Any] = self.continuous_encoder( encoder_inputs=lowercase_ , encoder_inputs_mask=lowercase_ ) return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)] def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : str ) -> Tuple: lowercase__ : Union[str, Any] = noise_time if not torch.is_tensor(lowercase_ ): lowercase__ : Optional[Any] = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device ) elif torch.is_tensor(lowercase_ ) and len(timesteps.shape ) == 0: lowercase__ : Optional[Any] = timesteps[None].to(input_tokens.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML lowercase__ : int = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device ) lowercase__ : str = self.decoder( encodings_and_masks=lowercase_ , decoder_input_tokens=lowercase_ , decoder_noise_time=lowercase_ ) return logits @torch.no_grad() def __call__( self : List[str] , lowercase_ : List[List[int]] , lowercase_ : Optional[torch.Generator] = None , lowercase_ : int = 1_00 , lowercase_ : bool = True , lowercase_ : str = "numpy" , lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]: if (callback_steps is None) or ( callback_steps is not None and (not isinstance(lowercase_ , lowercase_ ) or callback_steps <= 0) ): raise ValueError( F'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' F''' {type(lowercase_ )}.''' ) lowercase__ : str = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa ) lowercase__ : Optional[int] = np.zeros([1, 0, self.n_dims] , np.floataa ) lowercase__ : str = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device ) for i, encoder_input_tokens in enumerate(lowercase_ ): if i == 0: lowercase__ : Union[str, Any] = torch.from_numpy(pred_mel[:1].copy() ).to( device=self.device , dtype=self.decoder.dtype ) # The first chunk has no previous context. lowercase__ : List[str] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device ) else: # The full song pipeline does not feed in a context feature, so the mask # will be all 0s after the feature converter. Because we know we're # feeding in a full context chunk from the previous prediction, set it # to all 1s. lowercase__ : str = ones lowercase__ : str = self.scale_features( lowercase_ , output_range=[-1.0, 1.0] , clip=lowercase_ ) lowercase__ : str = self.encode( input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=lowercase_ , continuous_mask=lowercase_ , ) # Sample encoder_continuous_inputs shaped gaussian noise to begin loop lowercase__ : List[str] = randn_tensor( shape=encoder_continuous_inputs.shape , generator=lowercase_ , device=self.device , dtype=self.decoder.dtype , ) # set step values self.scheduler.set_timesteps(lowercase_ ) # Denoising diffusion loop for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): lowercase__ : Optional[int] = self.decode( encodings_and_masks=lowercase_ , input_tokens=lowercase_ , noise_time=t / self.scheduler.config.num_train_timesteps , ) # Compute previous output: x_t -> x_t-1 lowercase__ : Optional[Any] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample lowercase__ : Tuple = self.scale_to_features(lowercase_ , input_range=[-1.0, 1.0] ) lowercase__ : List[str] = mel[:1] lowercase__ : Optional[int] = mel.cpu().float().numpy() lowercase__ : str = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 ) # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(lowercase_ , lowercase_ ) logger.info("Generated segment" , lowercase_ ) if output_type == "numpy" and not is_onnx_available(): raise ValueError( "Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." ) elif output_type == "numpy" and self.melgan is None: raise ValueError( "Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." ) if output_type == "numpy": lowercase__ : Union[str, Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) ) else: lowercase__ : Dict = full_pred_mel if not return_dict: return (output,) return AudioPipelineOutput(audios=lowercase_ )
87
0
'''simple docstring''' import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py UpperCamelCase_ = """src/diffusers""" # Matches is_xxx_available() UpperCamelCase_ = re.compile(r"""is\_([a-z_]*)_available\(\)""") # Matches from xxx import bla UpperCamelCase_ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") UpperCamelCase_ = """ {0} = None """ UpperCamelCase_ = """ class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, {1}) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, {1}) """ UpperCamelCase_ = """ def {0}(*args, **kwargs): requires_backends({0}, {1}) """ def _UpperCAmelCase ( _lowerCamelCase : List[str] ) -> Dict: _lowerCAmelCase : Union[str, Any] = _re_backend.findall(_lowerCamelCase ) if len(_lowerCamelCase ) == 0: return None return "_and_".join(_lowerCamelCase ) def _UpperCAmelCase ( ) -> Any: with open(os.path.join(_lowerCamelCase , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: _lowerCAmelCase : Any = f.readlines() # Get to the point we do the actual imports for type checking _lowerCAmelCase : List[Any] = 0 _lowerCAmelCase : Tuple = {} # Go through the end of the file while line_index < len(_lowerCamelCase ): # If the line contains is_backend_available, we grab all objects associated with the `else` block _lowerCAmelCase : Tuple = find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith("""else:""" ): line_index += 1 line_index += 1 _lowerCAmelCase : str = [] # Until we unindent, add backend objects to the list while line_index < len(_lowerCamelCase ) and len(lines[line_index] ) > 1: _lowerCAmelCase : Any = lines[line_index] _lowerCAmelCase : Union[str, Any] = _re_single_line_import.search(_lowerCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(_lowerCamelCase ) > 0: _lowerCAmelCase : List[str] = objects else: line_index += 1 return backend_specific_objects def _UpperCAmelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : int ) -> Optional[int]: if name.isupper(): return DUMMY_CONSTANT.format(_lowerCamelCase ) elif name.islower(): return DUMMY_FUNCTION.format(_lowerCamelCase , _lowerCamelCase ) else: return DUMMY_CLASS.format(_lowerCamelCase , _lowerCamelCase ) def _UpperCAmelCase ( _lowerCamelCase : List[Any]=None ) -> Union[str, Any]: if backend_specific_objects is None: _lowerCAmelCase : Union[str, Any] = read_init() # For special correspondence backend to module name as used in the function requires_modulename _lowerCAmelCase : Dict = {} for backend, objects in backend_specific_objects.items(): _lowerCAmelCase : Any = "[" + ", ".join(f'"{b}"' for b in backend.split("""_and_""" ) ) + "]" _lowerCAmelCase : Union[str, Any] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n" dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(_lowerCamelCase , _lowerCamelCase ) for o in objects] ) _lowerCAmelCase : Tuple = dummy_file return dummy_files def _UpperCAmelCase ( _lowerCamelCase : int=False ) -> Optional[int]: _lowerCAmelCase : Optional[Any] = create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py _lowerCAmelCase : Tuple = {"torch": "pt"} # Locate actual dummy modules and read their content. _lowerCAmelCase : str = os.path.join(_lowerCamelCase , """utils""" ) _lowerCAmelCase : int = { backend: os.path.join(_lowerCamelCase , f'dummy_{short_names.get(_lowerCamelCase , _lowerCamelCase )}_objects.py' ) for backend in dummy_files.keys() } _lowerCAmelCase : Tuple = {} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(_lowerCamelCase ): with open(_lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: _lowerCAmelCase : Optional[Any] = f.read() else: _lowerCAmelCase : str = "" for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( f'Updating diffusers.utils.dummy_{short_names.get(_lowerCamelCase , _lowerCamelCase )}_objects.py as the main ' """__init__ has new objects.""" ) with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(dummy_files[backend] ) else: raise ValueError( """The main __init__ has objects that are not present in """ f'diffusers.utils.dummy_{short_names.get(_lowerCamelCase , _lowerCamelCase )}_objects.py. Run `make fix-copies` ' """to fix this.""" ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") UpperCamelCase_ = parser.parse_args() check_dummies(args.fix_and_overwrite)
309
import unittest from datasets import load_dataset from transformers.pipelines import pipeline from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow @is_pipeline_test @require_torch class snake_case_ ( unittest.TestCase ): @require_torch def __UpperCamelCase ( self : Optional[int] ) -> List[Any]: lowercase__ : Union[str, Any] = pipeline( task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" ) lowercase__ : List[str] = load_dataset("ashraq/esc50" ) lowercase__ : List[Any] = dataset["train"]["audio"][-1]["array"] lowercase__ : Dict = audio_classifier(lowercase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] ) self.assertEqual( nested_simplify(lowercase_ ) , [{"score": 0.5_01, "label": "Sound of a dog"}, {"score": 0.4_99, "label": "Sound of vaccum cleaner"}] , ) @unittest.skip("No models are available in TF" ) def __UpperCamelCase ( self : str ) -> Optional[int]: pass @slow @require_torch def __UpperCamelCase ( self : List[str] ) -> int: lowercase__ : Tuple = pipeline( task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , ) # This is an audio of a dog lowercase__ : Union[str, Any] = load_dataset("ashraq/esc50" ) lowercase__ : Tuple = dataset["train"]["audio"][-1]["array"] lowercase__ : List[Any] = audio_classifier(lowercase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] ) self.assertEqual( nested_simplify(lowercase_ ) , [ {"score": 0.9_99, "label": "Sound of a dog"}, {"score": 0.0_01, "label": "Sound of vaccum cleaner"}, ] , ) lowercase__ : int = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] ) self.assertEqual( nested_simplify(lowercase_ ) , [ [ {"score": 0.9_99, "label": "Sound of a dog"}, {"score": 0.0_01, "label": "Sound of vaccum cleaner"}, ], ] * 5 , ) lowercase__ : Tuple = audio_classifier( [audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 ) self.assertEqual( nested_simplify(lowercase_ ) , [ [ {"score": 0.9_99, "label": "Sound of a dog"}, {"score": 0.0_01, "label": "Sound of vaccum cleaner"}, ], ] * 5 , ) @unittest.skip("No models are available in TF" ) def __UpperCamelCase ( self : Union[str, Any] ) -> Dict: pass
87
0
def UpperCamelCase__( UpperCamelCase__ : dict )->Optional[int]: A__ = set() # edges = list of graph's edges A__ = get_edges(_lowerCamelCase ) # While there are still elements in edges list, take an arbitrary edge # (from_node, to_node) and add his extremity to chosen_vertices and then # remove all arcs adjacent to the from_node and to_node while edges: A__ = edges.pop() chosen_vertices.add(_lowerCamelCase ) chosen_vertices.add(_lowerCamelCase ) for edge in edges.copy(): if from_node in edge or to_node in edge: edges.discard(_lowerCamelCase ) return chosen_vertices def UpperCamelCase__( UpperCamelCase__ : dict )->Any: A__ = set() for from_node, to_nodes in graph.items(): for to_node in to_nodes: edges.add((from_node, to_node) ) return edges if __name__ == "__main__": import doctest doctest.testmod() # graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} # print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
193
import operator def lowercase_ ( _lowerCamelCase : list , _lowerCamelCase : bool = False , _lowerCamelCase : list | None = None): lowercase__ : int = operator.lt if reverse else operator.gt lowercase__ : str = solution or [] if not arr: return solution lowercase__ : List[str] = [arr.pop(0)] for i, item in enumerate(_lowerCamelCase): if _operator(_lowerCamelCase , sublist[-1]): sublist.append(_lowerCamelCase) arr.pop(_lowerCamelCase) # merging sublist into solution list if not solution: solution.extend(_lowerCamelCase) else: while sublist: lowercase__ : str = sublist.pop(0) for i, xx in enumerate(_lowerCamelCase): if not _operator(_lowerCamelCase , _lowerCamelCase): solution.insert(_lowerCamelCase , _lowerCamelCase) break else: solution.append(_lowerCamelCase) strand_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
87
0
import inspect from typing import Optional, Union import numpy as np import PIL import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import ( PIL_INTERPOLATION, randn_tensor, ) def A ( _lowercase , _lowercase , _lowercase ): if isinstance(_lowerCamelCase , torch.Tensor ): return image elif isinstance(_lowerCamelCase , PIL.Image.Image ): SCREAMING_SNAKE_CASE : List[str] = [image] if isinstance(image[0] , PIL.Image.Image ): SCREAMING_SNAKE_CASE : Any = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image] SCREAMING_SNAKE_CASE : Tuple = np.concatenate(_lowerCamelCase , axis=0 ) SCREAMING_SNAKE_CASE : Any = np.array(_lowerCamelCase ).astype(np.floataa ) / 255.0 SCREAMING_SNAKE_CASE : List[Any] = image.transpose(0 , 3 , 1 , 2 ) SCREAMING_SNAKE_CASE : Tuple = 2.0 * image - 1.0 SCREAMING_SNAKE_CASE : Optional[int] = torch.from_numpy(_lowerCamelCase ) elif isinstance(image[0] , torch.Tensor ): SCREAMING_SNAKE_CASE : Any = torch.cat(_lowerCamelCase , dim=0 ) return image def A ( _lowercase , _lowercase , _lowercase , _lowercase=0.9995 ): if not isinstance(_lowerCamelCase , np.ndarray ): SCREAMING_SNAKE_CASE : List[str] = True SCREAMING_SNAKE_CASE : Tuple = va.device SCREAMING_SNAKE_CASE : Union[str, Any] = va.cpu().numpy() SCREAMING_SNAKE_CASE : str = va.cpu().numpy() SCREAMING_SNAKE_CASE : Tuple = np.sum(va * va / (np.linalg.norm(_lowerCamelCase ) * np.linalg.norm(_lowerCamelCase )) ) if np.abs(_lowerCamelCase ) > DOT_THRESHOLD: SCREAMING_SNAKE_CASE : Any = (1 - t) * va + t * va else: SCREAMING_SNAKE_CASE : Any = np.arccos(_lowerCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = np.sin(_lowerCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = theta_a * t SCREAMING_SNAKE_CASE : Optional[Any] = np.sin(_lowerCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = np.sin(theta_a - theta_t ) / sin_theta_a SCREAMING_SNAKE_CASE : Dict = sin_theta_t / sin_theta_a SCREAMING_SNAKE_CASE : List[Any] = sa * va + sa * va if inputs_are_torch: SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(_lowerCamelCase ).to(_lowerCamelCase ) return va def A ( _lowercase , _lowercase ): SCREAMING_SNAKE_CASE : Any = F.normalize(_lowerCamelCase , dim=-1 ) SCREAMING_SNAKE_CASE : Optional[Any] = F.normalize(_lowerCamelCase , dim=-1 ) return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 ) def A ( _lowercase , _lowercase ): for param in model.parameters(): SCREAMING_SNAKE_CASE : str = value class lowercase__ ( __A): def __init__( self : str , UpperCamelCase__ : AutoencoderKL , UpperCamelCase__ : CLIPTextModel , UpperCamelCase__ : CLIPModel , UpperCamelCase__ : CLIPTokenizer , UpperCamelCase__ : UNetaDConditionModel , UpperCamelCase__ : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , UpperCamelCase__ : CLIPFeatureExtractor , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Tuple=None , ): '''simple docstring''' super().__init__() self.register_modules( vae=lowercase_ , text_encoder=lowercase_ , clip_model=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , feature_extractor=lowercase_ , coca_model=lowercase_ , coca_tokenizer=lowercase_ , coca_transform=lowercase_ , ) SCREAMING_SNAKE_CASE : int = ( feature_extractor.size if isinstance(feature_extractor.size , lowercase_ ) else feature_extractor.size["shortest_edge"] ) SCREAMING_SNAKE_CASE : Any = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std ) set_requires_grad(self.text_encoder , lowercase_ ) set_requires_grad(self.clip_model , lowercase_ ) def __A ( self : Any , UpperCamelCase__ : Optional[Union[str, int]] = "auto" ): '''simple docstring''' if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory SCREAMING_SNAKE_CASE : int = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(lowercase_ ) def __A ( self : Optional[int] ): '''simple docstring''' self.enable_attention_slicing(lowercase_ ) def __A ( self : Tuple ): '''simple docstring''' set_requires_grad(self.vae , lowercase_ ) def __A ( self : Tuple ): '''simple docstring''' set_requires_grad(self.vae , lowercase_ ) def __A ( self : Dict ): '''simple docstring''' set_requires_grad(self.unet , lowercase_ ) def __A ( self : List[Any] ): '''simple docstring''' set_requires_grad(self.unet , lowercase_ ) def __A ( self : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = min(int(num_inference_steps * strength ) , lowercase_ ) SCREAMING_SNAKE_CASE : Optional[int] = max(num_inference_steps - init_timestep , 0 ) SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def __A ( self : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=None ): '''simple docstring''' if not isinstance(lowercase_ , torch.Tensor ): raise ValueError(f"""`image` has to be of type `torch.Tensor` but is {type(lowercase_ )}""" ) SCREAMING_SNAKE_CASE : int = image.to(device=lowercase_ , dtype=lowercase_ ) if isinstance(lowercase_ , lowercase_ ): SCREAMING_SNAKE_CASE : Tuple = [ self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowercase_ ) ] SCREAMING_SNAKE_CASE : Any = torch.cat(lowercase_ , dim=0 ) else: SCREAMING_SNAKE_CASE : List[str] = self.vae.encode(lowercase_ ).latent_dist.sample(lowercase_ ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor SCREAMING_SNAKE_CASE : Union[str, Any] = 0.1_8215 * init_latents SCREAMING_SNAKE_CASE : Optional[Any] = init_latents.repeat_interleave(lowercase_ , dim=0 ) SCREAMING_SNAKE_CASE : int = randn_tensor(init_latents.shape , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ ) # get latents SCREAMING_SNAKE_CASE : str = self.scheduler.add_noise(lowercase_ , lowercase_ , lowercase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = init_latents return latents def __A ( self : Dict , UpperCamelCase__ : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.coca_transform(lowercase_ ).unsqueeze(0 ) with torch.no_grad(), torch.cuda.amp.autocast(): SCREAMING_SNAKE_CASE : int = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) ) SCREAMING_SNAKE_CASE : List[Any] = self.coca_tokenizer.decode(generated[0].cpu().numpy() ) return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' ) def __A ( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.feature_extractor.preprocess(lowercase_ ) SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half() SCREAMING_SNAKE_CASE : int = self.clip_model.get_image_features(lowercase_ ) SCREAMING_SNAKE_CASE : List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowercase_ ) SCREAMING_SNAKE_CASE : Tuple = image_embeddings_clip.repeat_interleave(lowercase_ , dim=0 ) return image_embeddings_clip @torch.enable_grad() def __A ( self : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = latents.detach().requires_grad_() SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.scale_model_input(lowercase_ , lowercase_ ) # predict the noise residual SCREAMING_SNAKE_CASE : Optional[Any] = self.unet(lowercase_ , lowercase_ , encoder_hidden_states=lowercase_ ).sample if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ): SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler.alphas_cumprod[timestep] SCREAMING_SNAKE_CASE : Tuple = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf SCREAMING_SNAKE_CASE : Union[str, Any] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 SCREAMING_SNAKE_CASE : Optional[Any] = torch.sqrt(lowercase_ ) SCREAMING_SNAKE_CASE : List[str] = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler , lowercase_ ): SCREAMING_SNAKE_CASE : Tuple = self.scheduler.sigmas[index] SCREAMING_SNAKE_CASE : Any = latents - sigma * noise_pred else: raise ValueError(f"""scheduler type {type(self.scheduler )} not supported""" ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor SCREAMING_SNAKE_CASE : Dict = 1 / 0.1_8215 * sample SCREAMING_SNAKE_CASE : Optional[Any] = self.vae.decode(lowercase_ ).sample SCREAMING_SNAKE_CASE : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 ) SCREAMING_SNAKE_CASE : List[str] = transforms.Resize(self.feature_extractor_size )(lowercase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = self.normalize(lowercase_ ).to(latents.dtype ) SCREAMING_SNAKE_CASE : List[Any] = self.clip_model.get_image_features(lowercase_ ) SCREAMING_SNAKE_CASE : List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowercase_ ) SCREAMING_SNAKE_CASE : int = spherical_dist_loss(lowercase_ , lowercase_ ).mean() * clip_guidance_scale SCREAMING_SNAKE_CASE : int = -torch.autograd.grad(lowercase_ , lowercase_ )[0] if isinstance(self.scheduler , lowercase_ ): SCREAMING_SNAKE_CASE : Optional[int] = latents.detach() + grads * (sigma**2) SCREAMING_SNAKE_CASE : Optional[int] = noise_pred_original else: SCREAMING_SNAKE_CASE : Union[str, Any] = noise_pred_original - torch.sqrt(lowercase_ ) * grads return noise_pred, latents @torch.no_grad() def __call__( self : Tuple , UpperCamelCase__ : Union[torch.FloatTensor, PIL.Image.Image] , UpperCamelCase__ : Union[torch.FloatTensor, PIL.Image.Image] , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[int] = 512 , UpperCamelCase__ : Optional[int] = 512 , UpperCamelCase__ : float = 0.6 , UpperCamelCase__ : Optional[int] = 50 , UpperCamelCase__ : Optional[float] = 7.5 , UpperCamelCase__ : Optional[int] = 1 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : Optional[float] = 100 , UpperCamelCase__ : Optional[torch.Generator] = None , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , UpperCamelCase__ : float = 0.8 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 0.1 , ): '''simple docstring''' if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size: raise ValueError(f"""You have passed {batch_size} batch_size, but only {len(lowercase_ )} generators.""" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" ) if isinstance(lowercase_ , torch.Generator ) and batch_size > 1: SCREAMING_SNAKE_CASE : List[str] = [generator] + [None] * (batch_size - 1) SCREAMING_SNAKE_CASE : str = [ ("model", self.coca_model is None), ("tokenizer", self.coca_tokenizer is None), ("transform", self.coca_transform is None), ] SCREAMING_SNAKE_CASE : Dict = [x[0] for x in coca_is_none if x[1]] SCREAMING_SNAKE_CASE : int = ", ".join(lowercase_ ) # generate prompts with coca model if prompt is None if content_prompt is None: if len(lowercase_ ): raise ValueError( f"""Content prompt is None and CoCa [{coca_is_none_str}] is None.""" f"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" ) SCREAMING_SNAKE_CASE : Dict = self.get_image_description(lowercase_ ) if style_prompt is None: if len(lowercase_ ): raise ValueError( f"""Style prompt is None and CoCa [{coca_is_none_str}] is None.""" f""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" ) SCREAMING_SNAKE_CASE : Optional[int] = self.get_image_description(lowercase_ ) # get prompt text embeddings for content and style SCREAMING_SNAKE_CASE : Tuple = self.tokenizer( lowercase_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=lowercase_ , return_tensors='''pt''' , ) SCREAMING_SNAKE_CASE : int = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0] SCREAMING_SNAKE_CASE : List[str] = self.tokenizer( lowercase_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=lowercase_ , return_tensors='''pt''' , ) SCREAMING_SNAKE_CASE : Optional[Any] = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0] SCREAMING_SNAKE_CASE : int = slerp(lowercase_ , lowercase_ , lowercase_ ) # duplicate text embeddings for each generation per prompt SCREAMING_SNAKE_CASE : Any = text_embeddings.repeat_interleave(lowercase_ , dim=0 ) # set timesteps SCREAMING_SNAKE_CASE : Optional[int] = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() ) SCREAMING_SNAKE_CASE : Optional[Any] = {} if accepts_offset: SCREAMING_SNAKE_CASE : Optional[Any] = 1 self.scheduler.set_timesteps(lowercase_ , **lowercase_ ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device ) SCREAMING_SNAKE_CASE : Optional[int] = self.get_timesteps(lowercase_ , lowercase_ , self.device ) SCREAMING_SNAKE_CASE : str = timesteps[:1].repeat(lowercase_ ) # Preprocess image SCREAMING_SNAKE_CASE : int = preprocess(lowercase_ , lowercase_ , lowercase_ ) SCREAMING_SNAKE_CASE : Tuple = self.prepare_latents( lowercase_ , lowercase_ , lowercase_ , text_embeddings.dtype , self.device , lowercase_ ) SCREAMING_SNAKE_CASE : List[Any] = preprocess(lowercase_ , lowercase_ , lowercase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.prepare_latents( lowercase_ , lowercase_ , lowercase_ , text_embeddings.dtype , self.device , lowercase_ ) SCREAMING_SNAKE_CASE : Any = slerp(lowercase_ , lowercase_ , lowercase_ ) if clip_guidance_scale > 0: SCREAMING_SNAKE_CASE : Tuple = self.get_clip_image_embeddings(lowercase_ , lowercase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = self.get_clip_image_embeddings(lowercase_ , lowercase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = slerp( lowercase_ , lowercase_ , lowercase_ ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. SCREAMING_SNAKE_CASE : Optional[int] = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: SCREAMING_SNAKE_CASE : int = content_text_input.input_ids.shape[-1] SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer([''''''] , padding='''max_length''' , max_length=lowercase_ , return_tensors='''pt''' ) SCREAMING_SNAKE_CASE : List[str] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt SCREAMING_SNAKE_CASE : Union[str, Any] = uncond_embeddings.repeat_interleave(lowercase_ , dim=0 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes SCREAMING_SNAKE_CASE : Tuple = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. SCREAMING_SNAKE_CASE : Union[str, Any] = (batch_size, self.unet.config.in_channels, height // 8, width // 8) SCREAMING_SNAKE_CASE : Union[str, Any] = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps SCREAMING_SNAKE_CASE : str = torch.randn(lowercase_ , generator=lowercase_ , device='''cpu''' , dtype=lowercase_ ).to( self.device ) else: SCREAMING_SNAKE_CASE : Tuple = torch.randn(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_ ) else: if latents.shape != latents_shape: raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" ) SCREAMING_SNAKE_CASE : List[Any] = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler SCREAMING_SNAKE_CASE : Any = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] SCREAMING_SNAKE_CASE : List[Any] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) SCREAMING_SNAKE_CASE : List[str] = {} if accepts_eta: SCREAMING_SNAKE_CASE : List[str] = eta # check if the scheduler accepts generator SCREAMING_SNAKE_CASE : Optional[Any] = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) if accepts_generator: SCREAMING_SNAKE_CASE : Optional[int] = generator with self.progress_bar(total=lowercase_ ): for i, t in enumerate(lowercase_ ): # expand the latents if we are doing classifier free guidance SCREAMING_SNAKE_CASE : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.scale_model_input(lowercase_ , lowercase_ ) # predict the noise residual SCREAMING_SNAKE_CASE : Tuple = self.unet(lowercase_ , lowercase_ , encoder_hidden_states=lowercase_ ).sample # perform classifier free guidance if do_classifier_free_guidance: SCREAMING_SNAKE_CASE : int = noise_pred.chunk(2 ) SCREAMING_SNAKE_CASE : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: SCREAMING_SNAKE_CASE : Optional[Any] = ( text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings ) SCREAMING_SNAKE_CASE : Dict = self.cond_fn( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) # compute the previous noisy sample x_t -> x_t-1 SCREAMING_SNAKE_CASE : Tuple = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor SCREAMING_SNAKE_CASE : Union[str, Any] = 1 / 0.1_8215 * latents SCREAMING_SNAKE_CASE : Optional[Any] = self.vae.decode(lowercase_ ).sample SCREAMING_SNAKE_CASE : List[Any] = (image / 2 + 0.5).clamp(0 , 1 ) SCREAMING_SNAKE_CASE : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": SCREAMING_SNAKE_CASE : Optional[int] = self.numpy_to_pil(lowercase_ ) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=lowercase_ , nsfw_content_detected=lowercase_ )
182
import time import warnings from abc import ABC from copy import deepcopy from typing import Optional import torch from ..utils import add_start_docstrings, logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = R''' Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs (`Dict[str, Any]`, *optional*): Additional stopping criteria specific kwargs. Return: `bool`. `False` indicates we should continue, `True` indicates we should stop. ''' class snake_case_ ( __A ): @add_start_docstrings(lowercase_ ) def __call__( self : Optional[Any] , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : List[str] ) -> bool: raise NotImplementedError("StoppingCriteria needs to be subclassed" ) class snake_case_ ( __A ): def __init__( self : Dict , lowercase_ : int , lowercase_ : Optional[int] = None ) -> List[str]: lowercase__ : str = max_length lowercase__ : Optional[int] = max_position_embeddings @add_start_docstrings(lowercase_ ) def __call__( self : Tuple , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Union[str, Any] ) -> bool: lowercase__ : str = input_ids.shape[-1] lowercase__ : Any = cur_len >= self.max_length if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: logger.warning_once( "This is a friendly reminder - the current text generation call will exceed the model's predefined " F'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe ''' "exceptions, performance degradation, or nothing at all." ) return is_done class snake_case_ ( __A ): def __init__( self : Tuple , lowercase_ : int , lowercase_ : int ) -> List[str]: warnings.warn( "The class `MaxNewTokensCriteria` is deprecated. " F'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` ''' "with `max_length = start_length + max_new_tokens` instead." , lowercase_ , ) lowercase__ : Optional[int] = start_length lowercase__ : str = max_new_tokens lowercase__ : Tuple = start_length + max_new_tokens @add_start_docstrings(lowercase_ ) def __call__( self : List[Any] , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Dict ) -> bool: return input_ids.shape[-1] >= self.max_length class snake_case_ ( __A ): def __init__( self : Tuple , lowercase_ : float , lowercase_ : Optional[float] = None ) -> Dict: lowercase__ : List[str] = max_time lowercase__ : Tuple = time.time() if initial_timestamp is None else initial_timestamp @add_start_docstrings(lowercase_ ) def __call__( self : int , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Union[str, Any] ) -> bool: return time.time() - self.initial_timestamp > self.max_time class snake_case_ ( __A ): @add_start_docstrings(lowercase_ ) def __call__( self : str , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : List[str] ) -> bool: return any(criteria(lowercase_ , lowercase_ ) for criteria in self ) @property def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]: for stopping_criterium in self: if isinstance(lowercase_ , lowercase_ ): return stopping_criterium.max_length elif isinstance(lowercase_ , lowercase_ ): return stopping_criterium.max_length return None def lowercase_ ( _lowerCamelCase : StoppingCriteriaList , _lowerCamelCase : int): lowercase__ : Optional[int] = stopping_criteria.max_length lowercase__ : str = deepcopy(_lowerCamelCase) if stopping_max_length is not None and stopping_max_length != max_length: warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , _lowerCamelCase) elif stopping_max_length is None: new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowerCamelCase)) return new_stopping_criteria
87
0
import math import time from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import SeqaSeqTrainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class _lowercase ( __A ): '''simple docstring''' def __init__( self :List[Any] , *lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any]=None , lowerCAmelCase__ :Tuple=None , **lowerCAmelCase__ :str ) -> Union[str, Any]: super().__init__(*lowercase_ , **lowercase_ ) __SCREAMING_SNAKE_CASE : Tuple = eval_examples __SCREAMING_SNAKE_CASE : Optional[int] = post_process_function def __magic_name__( self :List[Any] , lowerCAmelCase__ :Optional[Dataset] = None , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :Optional[List[str]] = None , lowerCAmelCase__ :str = "eval" , **lowerCAmelCase__ :Any , ) -> Dict[str, float]: __SCREAMING_SNAKE_CASE : List[Any] = gen_kwargs.copy() __SCREAMING_SNAKE_CASE : Optional[int] = ( gen_kwargs["max_length"] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length ) __SCREAMING_SNAKE_CASE : List[str] = ( gen_kwargs["num_beams"] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams ) __SCREAMING_SNAKE_CASE : Tuple = gen_kwargs __SCREAMING_SNAKE_CASE : Tuple = self.eval_dataset if eval_dataset is None else eval_dataset __SCREAMING_SNAKE_CASE : str = self.get_eval_dataloader(lowercase_ ) __SCREAMING_SNAKE_CASE : Any = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. __SCREAMING_SNAKE_CASE : List[str] = self.compute_metrics __SCREAMING_SNAKE_CASE : str = None __SCREAMING_SNAKE_CASE : Any = time.time() __SCREAMING_SNAKE_CASE : List[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __SCREAMING_SNAKE_CASE : Optional[int] = eval_loop( lowercase_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , ) finally: __SCREAMING_SNAKE_CASE : int = compute_metrics __SCREAMING_SNAKE_CASE : Tuple = self.args.eval_batch_size * self.args.world_size if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default __SCREAMING_SNAKE_CASE : Optional[int] = self.post_process_function(lowercase_ , lowercase_ , lowercase_ ) __SCREAMING_SNAKE_CASE : str = self.compute_metrics(lowercase_ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f'''{metric_key_prefix}_''' ): __SCREAMING_SNAKE_CASE : List[str] = metrics.pop(lowercase_ ) metrics.update(output.metrics ) else: __SCREAMING_SNAKE_CASE : Union[str, Any] = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(lowercase_ ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) __SCREAMING_SNAKE_CASE : List[str] = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase_ ) return metrics def __magic_name__( self :str , lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :str = "test" , **lowerCAmelCase__ :Any ) -> Tuple: __SCREAMING_SNAKE_CASE : Any = gen_kwargs.copy() __SCREAMING_SNAKE_CASE : Tuple = self.get_test_dataloader(lowercase_ ) # Temporarily disable metric computation, we will do it in the loop here. __SCREAMING_SNAKE_CASE : List[Any] = self.compute_metrics __SCREAMING_SNAKE_CASE : Optional[Any] = None __SCREAMING_SNAKE_CASE : Any = time.time() __SCREAMING_SNAKE_CASE : Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __SCREAMING_SNAKE_CASE : Dict = eval_loop( lowercase_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , ) finally: __SCREAMING_SNAKE_CASE : Optional[int] = compute_metrics __SCREAMING_SNAKE_CASE : Union[str, Any] = self.args.eval_batch_size * self.args.world_size if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output __SCREAMING_SNAKE_CASE : int = self.post_process_function(lowercase_ , lowercase_ , lowercase_ , '''predict''' ) __SCREAMING_SNAKE_CASE : List[Any] = self.compute_metrics(lowercase_ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f'''{metric_key_prefix}_''' ): __SCREAMING_SNAKE_CASE : Tuple = metrics.pop(lowercase_ ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase_ )
9
from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def lowercase_ ( _lowerCamelCase : Dict[str, torch.Tensor]): lowercase__ : Any = [] lowercase__ : Optional[int] = [] lowercase__ : Tuple = [] for rt in rc.restypes: lowercase__ : Dict = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]] restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names]) lowercase__ : str = {name: i for i, name in enumerate(_lowerCamelCase)} restype_atomaa_to_atomaa_list.append( [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types]) restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names]) # Add dummy mapping for restype 'UNK' restype_atomaa_to_atomaa_list.append([0] * 14) restype_atomaa_to_atomaa_list.append([0] * 37) restype_atomaa_mask_list.append([0.0] * 14) lowercase__ : Union[str, Any] = torch.tensor( _lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , ) lowercase__ : str = torch.tensor( _lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , ) lowercase__ : List[str] = torch.tensor( _lowerCamelCase , dtype=torch.floataa , device=protein["aatype"].device , ) lowercase__ : str = protein["aatype"].to(torch.long) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein lowercase__ : Dict = restype_atomaa_to_atomaa[protein_aatype] lowercase__ : str = restype_atomaa_mask[protein_aatype] lowercase__ : List[Any] = residx_atomaa_mask lowercase__ : Optional[Any] = residx_atomaa_to_atomaa.long() # create the gather indices for mapping back lowercase__ : str = restype_atomaa_to_atomaa[protein_aatype] lowercase__ : str = residx_atomaa_to_atomaa.long() # create the corresponding mask lowercase__ : Optional[Any] = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["aatype"].device) for restype, restype_letter in enumerate(rc.restypes): lowercase__ : Tuple = rc.restype_atoa[restype_letter] lowercase__ : List[Any] = rc.residue_atoms[restype_name] for atom_name in atom_names: lowercase__ : Optional[int] = rc.atom_order[atom_name] lowercase__ : Tuple = 1 lowercase__ : Dict = restype_atomaa_mask[protein_aatype] lowercase__ : Any = residx_atomaa_mask return protein def lowercase_ ( _lowerCamelCase : Dict[str, torch.Tensor]): lowercase__ : Tuple = tree_map(lambda _lowerCamelCase: torch.tensor(_lowerCamelCase , device=batch["aatype"].device) , _lowerCamelCase , np.ndarray) lowercase__ : List[str] = tensor_tree_map(lambda _lowerCamelCase: np.array(_lowerCamelCase) , make_atomaa_masks(_lowerCamelCase)) return out
87
0
"""simple docstring""" import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline __UpperCamelCase : Dict = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False) parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''') parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''') __UpperCamelCase : Optional[Any] = parser.parse_args() __UpperCamelCase : Optional[int] = '''cpu''' __UpperCamelCase : Any = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings''' __UpperCamelCase : Optional[Any] = '''path-to-your-trained-model''' __UpperCamelCase : Union[str, Any] = StableDiffusionPipeline.from_pretrained(model_id) if args.dpm: __UpperCamelCase : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) __UpperCamelCase : List[Any] = pipe.to(device) # to channels last __UpperCamelCase : Union[str, Any] = pipe.unet.to(memory_format=torch.channels_last) __UpperCamelCase : str = pipe.vae.to(memory_format=torch.channels_last) __UpperCamelCase : int = pipe.text_encoder.to(memory_format=torch.channels_last) if pipe.requires_safety_checker: __UpperCamelCase : Tuple = pipe.safety_checker.to(memory_format=torch.channels_last) # optimize with ipex __UpperCamelCase : Union[str, Any] = torch.randn(2, 4, 6_4, 6_4) __UpperCamelCase : Optional[int] = torch.rand(1) * 9_9_9 __UpperCamelCase : Any = torch.randn(2, 7_7, 7_6_8) __UpperCamelCase : List[str] = (sample, timestep, encoder_hidden_status) try: __UpperCamelCase : Optional[int] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example) except Exception: __UpperCamelCase : Any = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True) __UpperCamelCase : List[str] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True) __UpperCamelCase : List[Any] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True) if pipe.requires_safety_checker: __UpperCamelCase : Union[str, Any] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True) # compute __UpperCamelCase : Optional[Any] = 6_6_6 __UpperCamelCase : List[str] = torch.Generator(device).manual_seed(seed) __UpperCamelCase : Tuple = {'''generator''': generator} if args.steps is not None: __UpperCamelCase : Tuple = args.steps with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa): __UpperCamelCase : List[str] = pipe(prompt, **generate_kwargs).images[0] # save image image.save('''generated.png''')
106
import unittest from transformers import BigBirdConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax from transformers.models.big_bird.modeling_flax_big_bird import ( FlaxBigBirdForCausalLM, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForPreTraining, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, FlaxBigBirdModel, ) class snake_case_ ( unittest.TestCase ): def __init__( self : Tuple , lowercase_ : List[Any] , lowercase_ : Union[str, Any]=2 , lowercase_ : Union[str, Any]=56 , lowercase_ : Tuple=True , lowercase_ : Optional[Any]=True , lowercase_ : Optional[Any]=True , lowercase_ : int=True , lowercase_ : Any=99 , lowercase_ : int=32 , lowercase_ : str=2 , lowercase_ : Union[str, Any]=2 , lowercase_ : Dict=7 , lowercase_ : Dict="gelu_new" , lowercase_ : Tuple=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : Tuple=5_12 , lowercase_ : Optional[Any]=16 , lowercase_ : List[Any]=2 , lowercase_ : Dict=0.02 , lowercase_ : int=4 , lowercase_ : Tuple="block_sparse" , lowercase_ : Dict=True , lowercase_ : Optional[int]=False , lowercase_ : Dict=2 , lowercase_ : int=3 , ) -> Union[str, Any]: lowercase__ : Dict = parent lowercase__ : Dict = batch_size lowercase__ : Tuple = seq_length lowercase__ : Dict = is_training lowercase__ : Dict = use_attention_mask lowercase__ : Tuple = use_token_type_ids lowercase__ : Optional[int] = use_labels lowercase__ : List[Any] = vocab_size lowercase__ : Any = hidden_size lowercase__ : List[Any] = num_hidden_layers lowercase__ : Union[str, Any] = num_attention_heads lowercase__ : str = intermediate_size lowercase__ : int = hidden_act lowercase__ : str = hidden_dropout_prob lowercase__ : List[str] = attention_probs_dropout_prob lowercase__ : Optional[Any] = max_position_embeddings lowercase__ : Union[str, Any] = type_vocab_size lowercase__ : Dict = type_sequence_label_size lowercase__ : Any = initializer_range lowercase__ : List[str] = num_choices lowercase__ : str = rescale_embeddings lowercase__ : Optional[Any] = attention_type lowercase__ : Optional[int] = use_bias lowercase__ : Optional[int] = block_size lowercase__ : str = num_random_blocks def __UpperCamelCase ( self : str ) -> Optional[Any]: lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ : str = None if self.use_attention_mask: lowercase__ : Any = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ : Optional[int] = None if self.use_token_type_ids: lowercase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase__ : int = BigBirdConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , ) return config, input_ids, token_type_ids, attention_mask def __UpperCamelCase ( self : Union[str, Any] ) -> int: lowercase__ : int = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs lowercase__ : Union[str, Any] = { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_flax class snake_case_ ( __A ,unittest.TestCase ): __A : Optional[int] = ( ( FlaxBigBirdForCausalLM, FlaxBigBirdModel, FlaxBigBirdForPreTraining, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, ) if is_flax_available() else () ) __A : List[str] = False __A : Any = False def __UpperCamelCase ( self : List[str] ) -> List[Any]: lowercase__ : Union[str, Any] = FlaxBigBirdModelTester(self ) @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : Optional[int] ) -> Dict: super().test_from_pretrained_save_pretrained() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : List[str] ) -> Any: super().test_from_pretrained_with_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : Tuple ) -> str: super().test_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : Dict ) -> Union[str, Any]: super().test_hidden_states_output() @slow def __UpperCamelCase ( self : Optional[int] ) -> Tuple: for model_class_name in self.all_model_classes: lowercase__ : Optional[Any] = model_class_name.from_pretrained("google/bigbird-roberta-base" ) self.assertIsNotNone(lowercase_ ) def __UpperCamelCase ( self : int ) -> Optional[int]: if self.test_attn_probs: super().test_attention_outputs() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : str ) -> Any: lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowercase__ : Union[str, Any] = self._prepare_for_class(lowercase_ , lowercase_ ) lowercase__ : Optional[Any] = model_class(lowercase_ ) @jax.jit def model_jitted(lowercase_ : Tuple , lowercase_ : int=None , **lowercase_ : Dict ): return model(input_ids=lowercase_ , attention_mask=lowercase_ , **lowercase_ ) with self.subTest("JIT Enabled" ): lowercase__ : int = model_jitted(**lowercase_ ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): lowercase__ : Any = model_jitted(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) ) for jitted_output, output in zip(lowercase_ , lowercase_ ): self.assertEqual(jitted_output.shape , output.shape ) def __UpperCamelCase ( self : List[Any] , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : List[Any]=1E-5 , lowercase_ : Any="outputs" , lowercase_ : List[str]=None ) -> List[Any]: # `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version, # an effort was done to return `attention_probs` (yet to be verified). if name.startswith("outputs.attentions" ): return else: super().check_pt_flax_outputs(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
87
0
from ...configuration_utils import PretrainedConfig from ...utils import logging A : Tuple = logging.get_logger(__name__) A : Any = { 'facebook/s2t-wav2vec2-large-en-de': ( 'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json' ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class A ( __A ): '''simple docstring''' A__ = "speech_to_text_2" A__ = ["past_key_values"] A__ = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"} def __init__(self : Optional[Any] , _UpperCAmelCase : List[Any]=1_0000 , _UpperCAmelCase : Any=6 , _UpperCAmelCase : Union[str, Any]=2048 , _UpperCAmelCase : Any=4 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Union[str, Any]="relu" , _UpperCAmelCase : Dict=256 , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : str=0.02 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : str=1 , _UpperCAmelCase : Dict=0 , _UpperCAmelCase : str=2 , _UpperCAmelCase : Any=1024 , **_UpperCAmelCase : Optional[int] , ) -> Optional[Any]: """simple docstring""" lowercase__ = vocab_size lowercase__ = d_model lowercase__ = decoder_ffn_dim lowercase__ = decoder_layers lowercase__ = decoder_attention_heads lowercase__ = dropout lowercase__ = attention_dropout lowercase__ = activation_dropout lowercase__ = activation_function lowercase__ = init_std lowercase__ = decoder_layerdrop lowercase__ = use_cache lowercase__ = decoder_layers lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True lowercase__ = max_target_positions super().__init__( pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
305
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCamelCase = { '''configuration_groupvit''': [ '''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GroupViTConfig''', '''GroupViTOnnxConfig''', '''GroupViTTextConfig''', '''GroupViTVisionConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GroupViTModel''', '''GroupViTPreTrainedModel''', '''GroupViTTextModel''', '''GroupViTVisionModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFGroupViTModel''', '''TFGroupViTPreTrainedModel''', '''TFGroupViTTextModel''', '''TFGroupViTVisionModel''', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
87
0
from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar _lowercase : int =TypeVar("T") class snake_case__ (Generic[T] ): """simple docstring""" __lowerCAmelCase :deque[T] # Cache store of keys __lowerCAmelCase :set[T] # References of the keys in cache __lowerCAmelCase :int = 10 # Maximum capacity of cache def __init__( self , __lowercase ) -> None: """simple docstring""" a__ : int = deque() a__ : str = set() if not n: a__ : str = sys.maxsize elif n < 0: raise ValueError("""n should be an integer greater than 0.""" ) else: a__ : List[Any] = n def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> None: """simple docstring""" if x not in self.key_reference: if len(self.dq_store ) == LRUCache._MAX_CAPACITY: a__ : Dict = self.dq_store.pop() self.key_reference.remove(lowercase_ ) else: self.dq_store.remove(lowercase_ ) self.dq_store.appendleft(lowercase_ ) self.key_reference.add(lowercase_ ) def SCREAMING_SNAKE_CASE__( self ) -> None: """simple docstring""" for k in self.dq_store: print(lowercase_ ) def __repr__( self ) -> str: """simple docstring""" return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}''' if __name__ == "__main__": import doctest doctest.testmod() _lowercase : Union[str, Any] =LRUCache(4) lru_cache.refer("A") lru_cache.refer(2) lru_cache.refer(3) lru_cache.refer("A") lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() print(lru_cache) assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
170
import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : int): assert isinstance(_lowerCamelCase , _lowerCamelCase) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True]) def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : str): lowercase__ : Optional[int] = tmp_path / "cache" lowercase__ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase).read() _check_json_dataset(_lowerCamelCase , _lowerCamelCase) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict , _lowerCamelCase : Dict): lowercase__ : List[Any] = tmp_path / "cache" lowercase__ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : List[Any] = features.copy() if features else default_expected_features lowercase__ : List[Any] = ( Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None ) lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read() _check_json_dataset(_lowerCamelCase , _lowerCamelCase) @pytest.mark.parametrize( "features" , [ None, {"col_3": "float64", "col_1": "string", "col_2": "int64"}, ] , ) def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : List[str]): lowercase__ : Optional[Any] = tmp_path / "cache" lowercase__ : Tuple = {"col_3": "float64", "col_1": "string", "col_2": "int64"} lowercase__ : List[Any] = features.copy() if features else default_expected_features lowercase__ : int = ( Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None ) lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read() assert isinstance(_lowerCamelCase , _lowerCamelCase) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int]): # jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"} lowercase__ : Any = {"col_2": "int64", "col_3": "float64", "col_1": "string"} lowercase__ : str = features.copy() lowercase__ : str = ( Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None ) lowercase__ : Optional[int] = tmp_path / "cache" lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read() assert isinstance(_lowerCamelCase , _lowerCamelCase) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("split" , [None, NamedSplit("train"), "train", "test"]) def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str]): lowercase__ : Union[str, Any] = tmp_path / "cache" lowercase__ : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , split=_lowerCamelCase).read() _check_json_dataset(_lowerCamelCase , _lowerCamelCase) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list]) def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int): if issubclass(_lowerCamelCase , _lowerCamelCase): lowercase__ : Tuple = jsonl_path elif issubclass(_lowerCamelCase , _lowerCamelCase): lowercase__ : str = [jsonl_path] lowercase__ : str = tmp_path / "cache" lowercase__ : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : Tuple = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase).read() _check_json_dataset(_lowerCamelCase , _lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int]=("train",)): assert isinstance(_lowerCamelCase , _lowerCamelCase) for split in splits: lowercase__ : Optional[Any] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True]) def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : str): lowercase__ : List[str] = tmp_path / "cache" lowercase__ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowercase__ : Optional[Any] = JsonDatasetReader({"train": jsonl_path} , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase).read() _check_json_datasetdict(_lowerCamelCase , _lowerCamelCase) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : List[str]): lowercase__ : str = tmp_path / "cache" lowercase__ : Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : Tuple = features.copy() if features else default_expected_features lowercase__ : Union[str, Any] = ( Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None ) lowercase__ : Tuple = JsonDatasetReader({"train": jsonl_path} , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read() _check_json_datasetdict(_lowerCamelCase , _lowerCamelCase) @pytest.mark.parametrize("split" , [None, NamedSplit("train"), "train", "test"]) def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Tuple): if split: lowercase__ : Tuple = {split: jsonl_path} else: lowercase__ : Tuple = "train" lowercase__ : int = {"train": jsonl_path, "test": jsonl_path} lowercase__ : Dict = tmp_path / "cache" lowercase__ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase).read() _check_json_datasetdict(_lowerCamelCase , _lowerCamelCase , splits=list(path.keys())) assert all(dataset[split].split == split for split in path.keys()) def lowercase_ ( _lowerCamelCase : Union[str, Any]): return json.load(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Optional[int]): return [json.loads(_lowerCamelCase) for line in buffer] class snake_case_ : @pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] ) def __UpperCamelCase ( self : List[Any] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Optional[Any]: with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ ).write() buffer.seek(0 ) lowercase__ : Optional[int] = load_json_function(lowercase_ ) assert isinstance(lowercase_ , lowercase_ ) assert isinstance(exported_content[0] , lowercase_ ) assert len(lowercase_ ) == 10 @pytest.mark.parametrize( "orient, container, keys, len_at" , [ ("records", list, {"tokens", "labels", "answers", "id"}, None), ("split", dict, {"columns", "data"}, "data"), ("index", dict, set("0123456789" ), None), ("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"), ("values", list, None, None), ("table", dict, {"schema", "data"}, "data"), ] , ) def __UpperCamelCase ( self : str , lowercase_ : int , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Tuple ) -> List[str]: with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , orient=lowercase_ ).write() buffer.seek(0 ) lowercase__ : str = load_json(lowercase_ ) assert isinstance(lowercase_ , lowercase_ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(lowercase_ , "keys" ) and not hasattr(exported_content[0] , "keys" ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(lowercase_ ) == 10 @pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] ) def __UpperCamelCase ( self : List[Any] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Optional[int]: with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , num_proc=2 ).write() buffer.seek(0 ) lowercase__ : str = load_json_function(lowercase_ ) assert isinstance(lowercase_ , lowercase_ ) assert isinstance(exported_content[0] , lowercase_ ) assert len(lowercase_ ) == 10 @pytest.mark.parametrize( "orient, container, keys, len_at" , [ ("records", list, {"tokens", "labels", "answers", "id"}, None), ("split", dict, {"columns", "data"}, "data"), ("index", dict, set("0123456789" ), None), ("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"), ("values", list, None, None), ("table", dict, {"schema", "data"}, "data"), ] , ) def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Any: with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , orient=lowercase_ , num_proc=2 ).write() buffer.seek(0 ) lowercase__ : Optional[Any] = load_json(lowercase_ ) assert isinstance(lowercase_ , lowercase_ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(lowercase_ , "keys" ) and not hasattr(exported_content[0] , "keys" ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(lowercase_ ) == 10 def __UpperCamelCase ( self : Dict , lowercase_ : List[str] ) -> str: with pytest.raises(lowercase_ ): with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , num_proc=0 ) @pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] ) def __UpperCamelCase ( self : List[Any] , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : List[Any] ) -> Any: lowercase__ : Dict = tmp_path_factory.mktemp("data" ) / F'''test.json.{extension}''' lowercase__ : Optional[int] = str(shared_datadir / F'''test_file.json.{extension}''' ) JsonDatasetWriter(lowercase_ , lowercase_ , compression=lowercase_ ).write() with fsspec.open(lowercase_ , "rb" , compression="infer" ) as f: lowercase__ : List[Any] = f.read() with fsspec.open(lowercase_ , "rb" , compression="infer" ) as f: lowercase__ : str = f.read() assert exported_content == original_content
87
0
'''simple docstring''' from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging a__ : Any = logging.get_logger(__name__) class lowercase_ : __UpperCAmelCase = 42 __UpperCAmelCase = None @staticmethod def __a ( ): raise NotImplementedError def __a ( self , a , a , a , **a ): raise NotImplementedError def __a ( self , a ): raise NotImplementedError def __a ( self ): if not self.is_available(): raise RuntimeError( f'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' ) @classmethod def __a ( cls ): return f'''`pip install {cls.pip_package or cls.name}`''' class lowercase_ ( __A ): __UpperCAmelCase = "optuna" @staticmethod def __a ( ): return is_optuna_available() def __a ( self , a , a , a , **a ): return run_hp_search_optuna(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ) def __a ( self , a ): return default_hp_space_optuna(lowercase_ ) class lowercase_ ( __A ): __UpperCAmelCase = "ray" __UpperCAmelCase = "'ray[tune]'" @staticmethod def __a ( ): return is_ray_available() def __a ( self , a , a , a , **a ): return run_hp_search_ray(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ) def __a ( self , a ): return default_hp_space_ray(lowercase_ ) class lowercase_ ( __A ): __UpperCAmelCase = "sigopt" @staticmethod def __a ( ): return is_sigopt_available() def __a ( self , a , a , a , **a ): return run_hp_search_sigopt(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ) def __a ( self , a ): return default_hp_space_sigopt(lowercase_ ) class lowercase_ ( __A ): __UpperCAmelCase = "wandb" @staticmethod def __a ( ): return is_wandb_available() def __a ( self , a , a , a , **a ): return run_hp_search_wandb(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ) def __a ( self , a ): return default_hp_space_wandb(lowercase_ ) a__ : Optional[int] = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def _UpperCamelCase ( ) -> Optional[int]: '''simple docstring''' UpperCamelCase__ = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(_lowerCamelCase ) > 0: UpperCamelCase__ = available_backends[0].name if len(_lowerCamelCase ) > 1: logger.info( F'''{len(_lowerCamelCase )} hyperparameter search backends available. Using {name} as the default.''' ) return name raise RuntimeError( "No hyperparameter search backend available.\n" + "\n".join( F''' - To install {backend.name} run {backend.pip_install()}''' for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
80
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class snake_case_ ( __A ): __A : Optional[Any] = ["image_processor", "tokenizer"] __A : Tuple = "LayoutLMv3ImageProcessor" __A : List[Any] = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast") def __init__( self : Union[str, Any] , lowercase_ : int=None , lowercase_ : str=None , **lowercase_ : Optional[Any] ) -> Optional[int]: lowercase__ : Union[str, Any] = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , lowercase_ , ) lowercase__ : Optional[int] = kwargs.pop("feature_extractor" ) lowercase__ : int = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(lowercase_ , lowercase_ ) def __call__( self : Dict , lowercase_ : Optional[Any] , lowercase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase_ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowercase_ : Union[List[List[int]], List[List[List[int]]]] = None , lowercase_ : Optional[Union[List[int], List[List[int]]]] = None , lowercase_ : bool = True , lowercase_ : Union[bool, str, PaddingStrategy] = False , lowercase_ : Union[bool, str, TruncationStrategy] = None , lowercase_ : Optional[int] = None , lowercase_ : int = 0 , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = True , lowercase_ : Optional[Union[str, TensorType]] = None , **lowercase_ : Dict , ) -> BatchEncoding: # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( "You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( "You cannot provide word labels if you initialized the image processor with apply_ocr set to True." ) # first, apply the image processor lowercase__ : Union[str, Any] = self.image_processor(images=lowercase_ , return_tensors=lowercase_ ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(lowercase_ , lowercase_ ): lowercase__ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension) lowercase__ : Any = features["words"] lowercase__ : Tuple = self.tokenizer( text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , ) # add pixel values lowercase__ : Optional[int] = features.pop("pixel_values" ) if return_overflowing_tokens is True: lowercase__ : Dict = self.get_overflowing_images(lowercase_ , encoded_inputs["overflow_to_sample_mapping"] ) lowercase__ : str = images return encoded_inputs def __UpperCamelCase ( self : List[Any] , lowercase_ : Tuple , lowercase_ : List[Any] ) -> Dict: # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image lowercase__ : Tuple = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(lowercase_ ) != len(lowercase_ ): raise ValueError( "Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got" F''' {len(lowercase_ )} and {len(lowercase_ )}''' ) return images_with_overflow def __UpperCamelCase ( self : int , *lowercase_ : Union[str, Any] , **lowercase_ : List[str] ) -> Union[str, Any]: return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : Union[str, Any] , *lowercase_ : str , **lowercase_ : int ) -> Dict: return self.tokenizer.decode(*lowercase_ , **lowercase_ ) @property def __UpperCamelCase ( self : Any ) -> Any: return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase_ , ) return self.image_processor_class @property def __UpperCamelCase ( self : List[Any] ) -> Tuple: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase_ , ) return self.image_processor
87
0
import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging lowerCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name class A ( __A ): def __init__( self : Optional[Any] , lowercase_ : AutoencoderKL , lowercase_ : CLIPTextModel , lowercase_ : CLIPTokenizer , lowercase_ : UNetaDConditionModel , lowercase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowercase_ : StableDiffusionSafetyChecker , lowercase_ : CLIPImageProcessor , ) -> Optional[int]: """simple docstring""" super().__init__() self.register_modules( vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ , feature_extractor=lowercase_ , ) def lowerCamelCase ( self : Tuple , lowercase_ : Optional[Union[str, int]] = "auto" ) -> Optional[int]: """simple docstring""" if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory _lowerCamelCase : str =self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(lowercase_ ) def lowerCamelCase ( self : Optional[int] ) -> Tuple: """simple docstring""" self.enable_attention_slicing(lowercase_ ) @torch.no_grad() def __call__( self : int , lowercase_ : Union[str, List[str]] , lowercase_ : int = 512 , lowercase_ : int = 512 , lowercase_ : int = 50 , lowercase_ : float = 7.5 , lowercase_ : Optional[Union[str, List[str]]] = None , lowercase_ : Optional[int] = 1 , lowercase_ : float = 0.0 , lowercase_ : Optional[torch.Generator] = None , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ : int = 1 , lowercase_ : Optional[torch.FloatTensor] = None , **lowercase_ : int , ) -> List[Any]: """simple docstring""" if isinstance(lowercase_ , lowercase_ ): _lowerCamelCase : Optional[int] =1 elif isinstance(lowercase_ , lowercase_ ): _lowerCamelCase : List[Any] =len(lowercase_ ) else: raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(lowercase_ )}''' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(lowercase_ , lowercase_ ) or callback_steps <= 0) ): raise ValueError( F'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' F''' {type(lowercase_ )}.''' ) # get prompt text embeddings _lowerCamelCase : List[str] =self.tokenizer( lowercase_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , ) _lowerCamelCase : str =text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: _lowerCamelCase : Any =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( 'The following part of your input was truncated because CLIP can only handle sequences up to' F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) _lowerCamelCase : str =text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: _lowerCamelCase : str =self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method _lowerCamelCase : List[Any] =text_embeddings.shape _lowerCamelCase : Union[str, Any] =text_embeddings.repeat(1 , lowercase_ , 1 ) _lowerCamelCase : Dict =text_embeddings.view(bs_embed * num_images_per_prompt , lowercase_ , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. _lowerCamelCase : Tuple =guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: _lowerCamelCase : List[str] if negative_prompt is None: _lowerCamelCase : int =[""] elif type(lowercase_ ) is not type(lowercase_ ): raise TypeError( F'''`negative_prompt` should be the same type to `prompt`, but got {type(lowercase_ )} !=''' F''' {type(lowercase_ )}.''' ) elif isinstance(lowercase_ , lowercase_ ): _lowerCamelCase : str =[negative_prompt] elif batch_size != len(lowercase_ ): raise ValueError( F'''`negative_prompt`: {negative_prompt} has batch size {len(lowercase_ )}, but `prompt`:''' F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches''' ' the batch size of `prompt`.' ) else: _lowerCamelCase : Union[str, Any] =negative_prompt _lowerCamelCase : List[Any] =text_input_ids.shape[-1] _lowerCamelCase : Any =self.tokenizer( lowercase_ , padding='max_length' , max_length=lowercase_ , truncation=lowercase_ , return_tensors='pt' , ) _lowerCamelCase : Optional[int] =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method _lowerCamelCase : Union[str, Any] =uncond_embeddings.shape[1] _lowerCamelCase : str =uncond_embeddings.repeat(lowercase_ , lowercase_ , 1 ) _lowerCamelCase : Optional[int] =uncond_embeddings.view(batch_size * num_images_per_prompt , lowercase_ , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _lowerCamelCase : Optional[Any] =torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. _lowerCamelCase : int =(batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) _lowerCamelCase : Optional[Any] =(batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) _lowerCamelCase : Union[str, Any] =text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps _lowerCamelCase : Tuple =torch.randn( lowercase_ , generator=lowercase_ , device='cpu' , dtype=lowercase_ ).to(self.device ) _lowerCamelCase : Union[str, Any] =torch.randn(lowercase_ , generator=lowercase_ , device='cpu' , dtype=lowercase_ ).to( self.device ) else: _lowerCamelCase : Any =torch.randn( lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_ ) _lowerCamelCase : Tuple =torch.randn(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_ ) else: if latents_reference.shape != latents_shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) _lowerCamelCase : Dict =latents_reference.to(self.device ) _lowerCamelCase : str =latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images _lowerCamelCase : Union[str, Any] =(latents_shape[3] - latents_shape_reference[3]) // 2 _lowerCamelCase : str =(latents_shape[2] - latents_shape_reference[2]) // 2 _lowerCamelCase : List[Any] =latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx _lowerCamelCase : int =latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy _lowerCamelCase : Any =0 if dx < 0 else dx _lowerCamelCase : Optional[Any] =0 if dy < 0 else dy _lowerCamelCase : List[Any] =max(-dx , 0 ) _lowerCamelCase : str =max(-dy , 0 ) # import pdb # pdb.set_trace() _lowerCamelCase : Any =latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(lowercase_ ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand _lowerCamelCase : int =self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler _lowerCamelCase : Optional[Any] =latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] _lowerCamelCase : Tuple ="eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) _lowerCamelCase : int ={} if accepts_eta: _lowerCamelCase : List[Any] =eta for i, t in enumerate(self.progress_bar(lowercase_ ) ): # expand the latents if we are doing classifier free guidance _lowerCamelCase : Union[str, Any] =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _lowerCamelCase : Any =self.scheduler.scale_model_input(lowercase_ , lowercase_ ) # predict the noise residual _lowerCamelCase : Optional[int] =self.unet(lowercase_ , lowercase_ , encoder_hidden_states=lowercase_ ).sample # perform guidance if do_classifier_free_guidance: _lowerCamelCase : List[str] =noise_pred.chunk(2 ) _lowerCamelCase : Dict =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 _lowerCamelCase : Optional[int] =self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(lowercase_ , lowercase_ , lowercase_ ) _lowerCamelCase : int =1 / 0.18215 * latents _lowerCamelCase : Dict =self.vae.decode(lowercase_ ).sample _lowerCamelCase : Tuple =(image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 _lowerCamelCase : int =image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if self.safety_checker is not None: _lowerCamelCase : List[str] =self.feature_extractor(self.numpy_to_pil(lowercase_ ) , return_tensors='pt' ).to( self.device ) _lowerCamelCase : int =self.safety_checker( images=lowercase_ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: _lowerCamelCase : List[str] =None if output_type == "pil": _lowerCamelCase : List[str] =self.numpy_to_pil(lowercase_ ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=lowercase_ , nsfw_content_detected=lowercase_ )
199
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging UpperCamelCase = logging.get_logger(__name__) if is_vision_available(): import PIL class snake_case_ ( __A ): __A : str = ["pixel_values"] def __init__( self : int , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 2_55 , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = True , **lowercase_ : Union[str, Any] , ) -> None: super().__init__(**lowercase_ ) lowercase__ : Tuple = size if size is not None else {"shortest_edge": 2_24} lowercase__ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_ ) lowercase__ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24} lowercase__ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_ , param_name="crop_size" ) lowercase__ : Dict = do_resize lowercase__ : List[Any] = size lowercase__ : int = resample lowercase__ : Union[str, Any] = do_center_crop lowercase__ : Optional[int] = crop_size lowercase__ : List[str] = do_rescale lowercase__ : int = rescale_factor lowercase__ : List[Any] = do_normalize lowercase__ : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN lowercase__ : str = image_std if image_std is not None else OPENAI_CLIP_STD lowercase__ : Dict = do_convert_rgb def __UpperCamelCase ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Union[str, Any] , ) -> np.ndarray: lowercase__ : str = get_size_dict(lowercase_ , default_to_square=lowercase_ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) lowercase__ : Dict = get_resize_output_image_size(lowercase_ , size=size["shortest_edge"] , default_to_square=lowercase_ ) return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : int , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : int , ) -> np.ndarray: lowercase__ : Optional[Any] = get_size_dict(lowercase_ ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(lowercase_ , size=(size["height"], size["width"]) , data_format=lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[Any] , ) -> Any: return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : str , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : str , ) -> np.ndarray: return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : Optional[Any] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : int = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **lowercase_ : Union[str, Any] , ) -> PIL.Image.Image: lowercase__ : int = do_resize if do_resize is not None else self.do_resize lowercase__ : Dict = size if size is not None else self.size lowercase__ : List[Any] = get_size_dict(lowercase_ , param_name="size" , default_to_square=lowercase_ ) lowercase__ : Dict = resample if resample is not None else self.resample lowercase__ : int = do_center_crop if do_center_crop is not None else self.do_center_crop lowercase__ : Dict = crop_size if crop_size is not None else self.crop_size lowercase__ : List[str] = get_size_dict(lowercase_ , param_name="crop_size" , default_to_square=lowercase_ ) lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale lowercase__ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase__ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize lowercase__ : int = image_mean if image_mean is not None else self.image_mean lowercase__ : List[str] = image_std if image_std is not None else self.image_std lowercase__ : Union[str, Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowercase__ : Union[str, Any] = make_list_of_images(lowercase_ ) if not valid_images(lowercase_ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowercase__ : Dict = [convert_to_rgb(lowercase_ ) for image in images] # All transformations expect numpy arrays. lowercase__ : Optional[Any] = [to_numpy_array(lowercase_ ) for image in images] if do_resize: lowercase__ : List[Any] = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images] if do_center_crop: lowercase__ : int = [self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images] if do_rescale: lowercase__ : str = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images] if do_normalize: lowercase__ : Optional[int] = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images] lowercase__ : Optional[Any] = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images] lowercase__ : List[str] = {"pixel_values": images} return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
87
0
"""simple docstring""" import unittest from transformers import BigBirdTokenizer, BigBirdTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __A : str = '''▁''' __A : str = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class _UpperCAmelCase ( __A , unittest.TestCase ): SCREAMING_SNAKE_CASE_ : Optional[int] = BigBirdTokenizer SCREAMING_SNAKE_CASE_ : Any = BigBirdTokenizerFast SCREAMING_SNAKE_CASE_ : Dict = True SCREAMING_SNAKE_CASE_ : Optional[Any] = True def A ( self : Union[str, Any] ) -> Optional[Any]: super().setUp() lowercase_ : Union[str, Any] = self.tokenizer_class(lowercase_ , keep_accents=lowercase_ ) tokenizer.save_pretrained(self.tmpdirname ) def A ( self : Tuple ) -> List[Any]: lowercase_ : List[Any] = "<s>" lowercase_ : Optional[Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ ) def A ( self : Optional[Any] ) -> int: lowercase_ : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<unk>''' ) self.assertEqual(vocab_keys[1] , '''<s>''' ) self.assertEqual(vocab_keys[-1] , '''[MASK]''' ) self.assertEqual(len(lowercase_ ) , 10_04 ) def A ( self : List[Any] ) -> str: self.assertEqual(self.get_tokenizer().vocab_size , 10_00 ) def A ( self : Optional[int] ) -> Tuple: if not self.test_rust_tokenizer: return lowercase_ : Dict = self.get_tokenizer() lowercase_ : Any = self.get_rust_tokenizer() lowercase_ : str = "I was born in 92000, and this is falsé." lowercase_ : Optional[int] = tokenizer.tokenize(lowercase_ ) lowercase_ : Any = rust_tokenizer.tokenize(lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) lowercase_ : List[str] = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) lowercase_ : List[str] = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) lowercase_ : str = self.get_rust_tokenizer() lowercase_ : Union[str, Any] = tokenizer.encode(lowercase_ ) lowercase_ : Optional[Any] = rust_tokenizer.encode(lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) def A ( self : Dict ) -> str: lowercase_ : Optional[Any] = BigBirdTokenizer(lowercase_ , keep_accents=lowercase_ ) lowercase_ : List[str] = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(lowercase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowercase_ ) , [2_85, 46, 10, 1_70, 3_82] , ) lowercase_ : Dict = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( lowercase_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) lowercase_ : Tuple = tokenizer.convert_tokens_to_ids(lowercase_ ) self.assertListEqual( lowercase_ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) lowercase_ : Tuple = tokenizer.convert_ids_to_tokens(lowercase_ ) self.assertListEqual( lowercase_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) @cached_property def A ( self : List[Any] ) -> Optional[int]: return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' ) @slow def A ( self : Optional[int] ) -> int: lowercase_ : Optional[Any] = "Hello World!" lowercase_ : Optional[int] = [65, 1_85_36, 22_60, 1_01, 66] self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) ) @slow def A ( self : Tuple ) -> Optional[Any]: lowercase_ : Tuple = ( "This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will" " add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth" ) # fmt: off lowercase_ : Optional[Any] = [65, 8_71, 4_19, 3_58, 9_46, 9_91, 25_21, 4_52, 3_58, 13_57, 3_87, 77_51, 35_36, 1_12, 9_85, 4_56, 1_26, 8_65, 9_38, 54_00, 57_34, 4_58, 13_68, 4_67, 7_86, 24_62, 52_46, 11_59, 6_33, 8_65, 45_19, 4_57, 5_82, 8_52, 25_57, 4_27, 9_16, 5_08, 4_05, 3_43_24, 4_97, 3_91, 4_08, 1_13_42, 12_44, 3_85, 1_00, 9_38, 9_85, 4_56, 5_74, 3_62, 1_25_97, 32_00, 31_29, 11_72, 66] # noqa: E231 # fmt: on self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) ) @require_torch @slow def A ( self : List[Any] ) -> int: import torch from transformers import BigBirdConfig, BigBirdModel # Build sequence lowercase_ : Optional[int] = list(self.big_tokenizer.get_vocab().keys() )[:10] lowercase_ : str = " ".join(lowercase_ ) lowercase_ : List[str] = self.big_tokenizer.encode_plus(lowercase_ , return_tensors='''pt''' , return_token_type_ids=lowercase_ ) lowercase_ : int = self.big_tokenizer.batch_encode_plus( [sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=lowercase_ ) lowercase_ : Optional[int] = BigBirdConfig(attention_type='''original_full''' ) lowercase_ : List[str] = BigBirdModel(lowercase_ ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**lowercase_ ) model(**lowercase_ ) @slow def A ( self : str ) -> Optional[int]: lowercase_ : List[Any] = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' ) lowercase_ : str = tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids ) self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' ) @slow def A ( self : Optional[int] ) -> Optional[Any]: # fmt: off lowercase_ : str = {"input_ids": [[65, 3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14, 66], [65, 4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase_ , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
33
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available UpperCamelCase = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''GPTSw3Tokenizer'''] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_swa import GPTSwaTokenizer else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
87
0
'''simple docstring''' import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) A__: int = logging.getLogger(__name__) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> Dict: _a : Any =git.Repo(search_parent_directories=_lowerCamelCase ) _a : Dict ={ "repo_id": str(_lowerCamelCase ), "repo_sha": str(repo.head.object.hexsha ), "repo_branch": str(repo.active_branch ), } with open(os.path.join(_lowerCamelCase ,"""git_log.json""" ) ,"""w""" ) as f: json.dump(_lowerCamelCase ,_lowerCamelCase ,indent=4 ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Dict ) -> List[str]: if params.n_gpu <= 0: _a : str =0 _a : int =-1 _a : Tuple =True _a : List[Any] =False return assert torch.cuda.is_available() logger.info("""Initializing GPUs""" ) if params.n_gpu > 1: assert params.local_rank != -1 _a : str =int(os.environ["""WORLD_SIZE"""] ) _a : Any =int(os.environ["""N_GPU_NODE"""] ) _a : int =int(os.environ["""RANK"""] ) # number of nodes / node ID _a : Union[str, Any] =params.world_size // params.n_gpu_per_node _a : str =params.global_rank // params.n_gpu_per_node _a : Tuple =True assert params.n_nodes == int(os.environ["""N_NODES"""] ) assert params.node_id == int(os.environ["""NODE_RANK"""] ) # local job (single GPU) else: assert params.local_rank == -1 _a : Tuple =1 _a : Any =0 _a : Optional[Any] =0 _a : str =0 _a : Union[str, Any] =1 _a : int =1 _a : int =False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode _a : Tuple =params.node_id == 0 and params.local_rank == 0 _a : Dict =params.n_nodes > 1 # summary _a : Optional[Any] =F"--- Global rank: {params.global_rank} - " logger.info(PREFIX + """Number of nodes: %i""" % params.n_nodes ) logger.info(PREFIX + """Node ID : %i""" % params.node_id ) logger.info(PREFIX + """Local rank : %i""" % params.local_rank ) logger.info(PREFIX + """World size : %i""" % params.world_size ) logger.info(PREFIX + """GPUs per node : %i""" % params.n_gpu_per_node ) logger.info(PREFIX + """Master : %s""" % str(params.is_master ) ) logger.info(PREFIX + """Multi-node : %s""" % str(params.multi_node ) ) logger.info(PREFIX + """Multi-GPU : %s""" % str(params.multi_gpu ) ) logger.info(PREFIX + """Hostname : %s""" % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info("""Initializing PyTorch distributed""" ) torch.distributed.init_process_group( init_method="""env://""" ,backend="""nccl""" ,) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Any ) -> List[str]: np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
276
UpperCamelCase = [0, 2, 4, 6, 8] UpperCamelCase = [1, 3, 5, 7, 9] def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[int] , _lowerCamelCase : int): if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1 , -1 , -1): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 10 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 lowercase__ : str = 0 for digit in range(10): lowercase__ : str = digit result += reversible_numbers( 0 , (remainder + 2 * digit) // 10 , _lowerCamelCase , _lowerCamelCase) return result lowercase__ : Dict = 0 for digita in range(10): lowercase__ : int = digita if (remainder + digita) % 2 == 0: lowercase__ : Optional[Any] = ODD_DIGITS else: lowercase__ : str = EVEN_DIGITS for digita in other_parity_digits: lowercase__ : List[str] = digita result += reversible_numbers( remaining_length - 2 , (remainder + digita + digita) // 10 , _lowerCamelCase , _lowerCamelCase , ) return result def lowercase_ ( _lowerCamelCase : int = 9): lowercase__ : Tuple = 0 for length in range(1 , max_power + 1): result += reversible_numbers(_lowerCamelCase , 0 , [0] * length , _lowerCamelCase) return result if __name__ == "__main__": print(f"{solution() = }")
87
0
'''simple docstring''' from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split UpperCamelCase_ = datasets.load_iris() UpperCamelCase_ = np.array(data["""data"""]) UpperCamelCase_ = np.array(data["""target"""]) UpperCamelCase_ = data["""target_names"""] UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = train_test_split(X, y) def _UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : Optional[Any] ) -> Tuple: return np.linalg.norm(np.array(_lowerCamelCase ) - np.array(_lowerCamelCase ) ) def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple , _lowerCamelCase : Any=5 ) -> Optional[Any]: _lowerCAmelCase : int = zip(_lowerCamelCase , _lowerCamelCase ) # List of distances of all points from the point to be classified _lowerCAmelCase : str = [] for data_point in data: _lowerCAmelCase : str = euclidean_distance(data_point[0] , _lowerCamelCase ) distances.append((distance, data_point[1]) ) # Choosing 'k' points with the least distances. _lowerCAmelCase : Dict = [i[1] for i in sorted(_lowerCamelCase )[:k]] # Most commonly occurring class among them # is the class into which the point is classified _lowerCAmelCase : Tuple = Counter(_lowerCamelCase ).most_common(1 )[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
309
import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets UpperCamelCase = '''\ @inproceedings{snover-etal-2006-study, title = "A Study of Translation Edit Rate with Targeted Human Annotation", author = "Snover, Matthew and Dorr, Bonnie and Schwartz, Rich and Micciulla, Linnea and Makhoul, John", booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers", month = aug # " 8-12", year = "2006", address = "Cambridge, Massachusetts, USA", publisher = "Association for Machine Translation in the Americas", url = "https://aclanthology.org/2006.amta-papers.25", pages = "223--231", } @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' UpperCamelCase = '''\ TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu (https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found here: https://github.com/jhclark/tercom. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information. ''' UpperCamelCase = ''' Produces TER scores alongside the number of edits and reference length. Args: predictions (list of str): The system stream (a sequence of segments). references (list of list of str): A list of one or more reference streams (each a sequence of segments). normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters, as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana. Only applies if `normalized = True`. Defaults to `False`. case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`. Returns: \'score\' (float): TER score (num_edits / sum_ref_lengths * 100) \'num_edits\' (int): The cumulative number of edits \'ref_length\' (float): The cumulative average reference length Examples: Example 1: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0} Example 2: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0} Example 3: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... normalized=True, ... case_sensitive=True) >>> print(results) {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5} Example 4: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0} Example 5: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class snake_case_ ( datasets.Metric ): def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple: if version.parse(scb.__version__ ) < version.parse("1.4.12" ): raise ImportWarning( "To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n" "You can install it with `pip install \"sacrebleu>=1.4.12\"`." ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[ "https://github.com/jhclark/tercom", ] , ) def __UpperCamelCase ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , ) -> Any: lowercase__ : Optional[int] = len(references[0] ) if any(len(lowercase_ ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) lowercase__ : Union[str, Any] = [[refs[i] for refs in references] for i in range(lowercase_ )] lowercase__ : str = TER( normalized=lowercase_ , no_punct=lowercase_ , asian_support=lowercase_ , case_sensitive=lowercase_ , ) lowercase__ : List[str] = sb_ter.corpus_score(lowercase_ , lowercase_ ) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
87
0
def UpperCamelCase__( UpperCamelCase__ : int , UpperCamelCase__ : int )->Optional[int]: while second != 0: A__ = first & second first ^= second A__ = c << 1 return first if __name__ == "__main__": import doctest doctest.testmod() a__: List[Any] = int(input('Enter the first number: ').strip()) a__: List[str] = int(input('Enter the second number: ').strip()) print(F"{add(first, second) = }")
193
def lowercase_ ( _lowerCamelCase : int): lowercase__ : Dict = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
87
0
from __future__ import annotations from math import gcd def A ( _lowercase , _lowercase = 2 , _lowercase = 1 , _lowercase = 3 , ): # A value less than 2 can cause an infinite loop in the algorithm. if num < 2: raise ValueError('''The input value cannot be less than 2''' ) # Because of the relationship between ``f(f(x))`` and ``f(x)``, this # algorithm struggles to find factors that are divisible by two. # As a workaround, we specifically check for two and even inputs. # See: https://math.stackexchange.com/a/2856214/165820 if num > 2 and num % 2 == 0: return 2 # Pollard's Rho algorithm requires a function that returns pseudorandom # values between 0 <= X < ``num``. It doesn't need to be random in the # sense that the output value is cryptographically secure or difficult # to calculate, it only needs to be random in the sense that all output # values should be equally likely to appear. # For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num`` # However, the success of Pollard's algorithm isn't guaranteed and is # determined in part by the initial seed and the chosen random function. # To make retries easier, we will instead use ``f(x) = (x**2 + C) % num`` # where ``C`` is a value that we can modify between each attempt. def rand_fn(_lowercase , _lowercase , _lowercase ) -> int: return (pow(_lowerCamelCase , 2 ) + step) % modulus for _ in range(_lowerCamelCase ): # These track the position within the cycle detection logic. SCREAMING_SNAKE_CASE : Optional[int] = seed SCREAMING_SNAKE_CASE : int = seed while True: # At each iteration, the tortoise moves one step and the hare moves two. SCREAMING_SNAKE_CASE : List[Any] = rand_fn(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : List[str] = rand_fn(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : int = rand_fn(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # At some point both the tortoise and the hare will enter a cycle whose # length ``p`` is a divisor of ``num``. Once in that cycle, at some point # the tortoise and hare will end up on the same value modulo ``p``. # We can detect when this happens because the position difference between # the tortoise and the hare will share a common divisor with ``num``. SCREAMING_SNAKE_CASE : Any = gcd(hare - tortoise , _lowerCamelCase ) if divisor == 1: # No common divisor yet, just keep searching. continue else: # We found a common divisor! if divisor == num: # Unfortunately, the divisor is ``num`` itself and is useless. break else: # The divisor is a nontrivial factor of ``num``! return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare # in addition to a new step value for the random function. # To keep this example implementation deterministic, the # new values will be generated based on currently available # values instead of using something like ``random.randint``. # We can use the hare's position as the new seed. # This is actually what Richard Brent's the "optimized" variant does. SCREAMING_SNAKE_CASE : Dict = hare # The new step value for the random function can just be incremented. # At first the results will be similar to what the old function would # have produced, but the value will quickly diverge after a bit. step += 1 # We haven't found a divisor within the requested number of attempts. # We were unlucky or ``num`` itself is actually prime. return None if __name__ == "__main__": import argparse __UpperCamelCase : Tuple = argparse.ArgumentParser() parser.add_argument( 'num', type=int, help='The value to find a divisor of', ) parser.add_argument( '--attempts', type=int, default=3, help='The number of attempts before giving up', ) __UpperCamelCase : Dict = parser.parse_args() __UpperCamelCase : str = pollard_rho(args.num, attempts=args.attempts) if divisor is None: print(f"""{args.num} is probably prime""") else: __UpperCamelCase : Union[str, Any] = args.num // divisor print(f"""{args.num} = {divisor} * {quotient}""")
182
from PIL import Image def lowercase_ ( _lowerCamelCase : Image , _lowerCamelCase : int): lowercase__ : List[str] = (259 * (level + 255)) / (255 * (259 - level)) def contrast(_lowerCamelCase : int) -> int: return int(128 + factor * (c - 128)) return img.point(_lowerCamelCase) if __name__ == "__main__": # Load image with Image.open('''image_data/lena.jpg''') as img: # Change contrast to 170 UpperCamelCase = change_contrast(img, 170) cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
87
0
from typing import List import jiwer import jiwer.transforms as tr from packaging import version import datasets from datasets.config import PY_VERSION if PY_VERSION < version.parse('3.8'): import importlib_metadata else: import importlib.metadata as importlib_metadata __lowerCAmelCase : Optional[Any] ='' if version.parse(importlib_metadata.version('jiwer')) < version.parse('2.3.0'): class _lowercase ( tr.AbstractTransform ): '''simple docstring''' def __init__( self :int , lowerCAmelCase__ :str = " " ) -> Optional[Any]: __SCREAMING_SNAKE_CASE : Optional[int] = sentence_delimiter def __magic_name__( self :Any , lowerCAmelCase__ :str ) -> List[str]: return list(lowercase_ ) def __magic_name__( self :Optional[int] , lowerCAmelCase__ :List[str] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE : str = [] for sent_idx, sentence in enumerate(lowercase_ ): chars.extend(self.process_string(lowercase_ ) ) if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowercase_ ) - 1: chars.append(self.sentence_delimiter ) return chars __lowerCAmelCase : List[str] =tr.Compose( [tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)] ) else: __lowerCAmelCase : List[str] =tr.Compose( [ tr.RemoveMultipleSpaces(), tr.Strip(), tr.ReduceToSingleSentence(SENTENCE_DELIMITER), tr.ReduceToListOfListOfChars(), ] ) __lowerCAmelCase : Dict ='\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n' __lowerCAmelCase : Union[str, Any] ='\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n' __lowerCAmelCase : Dict ='\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> cer = datasets.load_metric("cer")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowercase ( datasets.Metric ): '''simple docstring''' def __magic_name__( self :Tuple ) -> Any: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[ '''https://en.wikipedia.org/wiki/Word_error_rate''', '''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''', ] , ) def __magic_name__( self :List[str] , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Union[str, Any]=False ) -> Union[str, Any]: if concatenate_texts: return jiwer.compute_measures( lowercase_ , lowercase_ , truth_transform=lowercase_ , hypothesis_transform=lowercase_ , )["wer"] __SCREAMING_SNAKE_CASE : List[str] = 0 __SCREAMING_SNAKE_CASE : Dict = 0 for prediction, reference in zip(lowercase_ , lowercase_ ): __SCREAMING_SNAKE_CASE : Union[str, Any] = jiwer.compute_measures( lowercase_ , lowercase_ , truth_transform=lowercase_ , hypothesis_transform=lowercase_ , ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
9
from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar UpperCamelCase = TypeVar('''T''') class snake_case_ ( Generic[T] ): __A : deque[T] # Cache store of keys __A : set[T] # References of the keys in cache __A : int = 10 # Maximum capacity of cache def __init__( self : Union[str, Any] , lowercase_ : int ) -> None: lowercase__ : int = deque() lowercase__ : str = set() if not n: lowercase__ : str = sys.maxsize elif n < 0: raise ValueError("n should be an integer greater than 0." ) else: lowercase__ : List[Any] = n def __UpperCamelCase ( self : Dict , lowercase_ : T ) -> None: if x not in self.key_reference: if len(self.dq_store ) == LRUCache._MAX_CAPACITY: lowercase__ : Dict = self.dq_store.pop() self.key_reference.remove(lowercase_ ) else: self.dq_store.remove(lowercase_ ) self.dq_store.appendleft(lowercase_ ) self.key_reference.add(lowercase_ ) def __UpperCamelCase ( self : Dict ) -> None: for k in self.dq_store: print(lowercase_ ) def __repr__( self : Optional[int] ) -> str: return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}''' if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase = LRUCache(4) lru_cache.refer('''A''') lru_cache.refer(2) lru_cache.refer(3) lru_cache.refer('''A''') lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() print(lru_cache) assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
87
0
"""simple docstring""" import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin class SCREAMING_SNAKE_CASE ( unittest.TestCase , __A ): """simple docstring""" def __lowerCAmelCase ( self : Tuple ): lowerCAmelCase__ : Optional[Any] = load_tool('''text-classification''' ) self.tool.setup() lowerCAmelCase__ : Tuple = load_tool('''text-classification''' ,remote=lowercase_ ) def __lowerCAmelCase ( self : List[Any] ): lowerCAmelCase__ : Optional[int] = self.tool('''That\'s quite cool''' ,['''positive''', '''negative'''] ) self.assertEqual(lowercase_ ,'''positive''' ) def __lowerCAmelCase ( self : Optional[int] ): lowerCAmelCase__ : Tuple = self.remote_tool('''That\'s quite cool''' ,['''positive''', '''negative'''] ) self.assertEqual(lowercase_ ,'''positive''' ) def __lowerCAmelCase ( self : Optional[int] ): lowerCAmelCase__ : Union[str, Any] = self.tool(text='''That\'s quite cool''' ,labels=['''positive''', '''negative'''] ) self.assertEqual(lowercase_ ,'''positive''' ) def __lowerCAmelCase ( self : List[Any] ): lowerCAmelCase__ : List[Any] = self.remote_tool(text='''That\'s quite cool''' ,labels=['''positive''', '''negative'''] ) self.assertEqual(lowercase_ ,'''positive''' )
106
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''', '''YituTech/conv-bert-medium-small''': ( '''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json''' ), '''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''', # See all ConvBERT models at https://huggingface.co/models?filter=convbert } class snake_case_ ( __A ): __A : List[str] = "convbert" def __init__( self : Union[str, Any] , lowercase_ : str=3_05_22 , lowercase_ : Any=7_68 , lowercase_ : Tuple=12 , lowercase_ : List[str]=12 , lowercase_ : Optional[int]=30_72 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : str=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : Optional[Any]=5_12 , lowercase_ : Dict=2 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Optional[Any]=1E-12 , lowercase_ : Optional[int]=1 , lowercase_ : List[Any]=0 , lowercase_ : Optional[int]=2 , lowercase_ : str=7_68 , lowercase_ : Dict=2 , lowercase_ : Optional[Any]=9 , lowercase_ : Union[str, Any]=1 , lowercase_ : Any=None , **lowercase_ : Optional[Any] , ) -> Dict: super().__init__( pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ , ) lowercase__ : List[str] = vocab_size lowercase__ : Union[str, Any] = hidden_size lowercase__ : Any = num_hidden_layers lowercase__ : List[str] = num_attention_heads lowercase__ : Union[str, Any] = intermediate_size lowercase__ : Optional[Any] = hidden_act lowercase__ : int = hidden_dropout_prob lowercase__ : str = attention_probs_dropout_prob lowercase__ : Union[str, Any] = max_position_embeddings lowercase__ : Optional[int] = type_vocab_size lowercase__ : Tuple = initializer_range lowercase__ : List[str] = layer_norm_eps lowercase__ : List[Any] = embedding_size lowercase__ : Optional[Any] = head_ratio lowercase__ : Dict = conv_kernel_size lowercase__ : Tuple = num_groups lowercase__ : Optional[int] = classifier_dropout class snake_case_ ( __A ): @property def __UpperCamelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowercase__ : Tuple = {0: "batch", 1: "choice", 2: "sequence"} else: lowercase__ : str = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis), ] )
87
0
from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL A : Any = logging.get_logger(__name__) def UpperCamelCase ( __magic_name__ : Union[str, Any] ) -> Tuple: """simple docstring""" if isinstance(_lowerCamelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(_lowerCamelCase , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(_lowerCamelCase ): return [[videos]] raise ValueError(f'''Could not make batched video from {videos}''' ) class A ( __A ): '''simple docstring''' A__ = ["pixel_values"] def __init__(self : Union[str, Any] , _UpperCAmelCase : bool = True , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCAmelCase : bool = True , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[int, float] = 1 / 255 , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , **_UpperCAmelCase : Union[str, Any] , ) -> None: """simple docstring""" super().__init__(**lowercase_ ) lowercase__ = size if size is not None else {"shortest_edge": 256} lowercase__ = get_size_dict(lowercase_ , default_to_square=lowercase_ ) lowercase__ = crop_size if crop_size is not None else {"height": 224, "width": 224} lowercase__ = get_size_dict(lowercase_ , param_name="""crop_size""" ) lowercase__ = do_resize lowercase__ = size lowercase__ = do_center_crop lowercase__ = crop_size lowercase__ = resample lowercase__ = do_rescale lowercase__ = rescale_factor lowercase__ = offset lowercase__ = do_normalize lowercase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowercase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowerCamelCase__ (self : Dict , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : List[str] , ) -> np.ndarray: """simple docstring""" lowercase__ = get_size_dict(lowercase_ , default_to_square=lowercase_ ) if "shortest_edge" in size: lowercase__ = get_resize_output_image_size(lowercase_ , size["""shortest_edge"""] , default_to_square=lowercase_ ) elif "height" in size and "width" in size: lowercase__ = (size["height"], size["width"]) else: raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' ) return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ ) def lowerCamelCase__ (self : Dict , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Dict , ) -> np.ndarray: """simple docstring""" lowercase__ = get_size_dict(lowercase_ ) if "height" not in size or "width" not in size: raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' ) return center_crop(lowercase_ , size=(size["""height"""], size["""width"""]) , data_format=lowercase_ , **lowercase_ ) def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[int, float] , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : str , ) -> Any: """simple docstring""" lowercase__ = image.astype(np.floataa ) if offset: lowercase__ = image - (scale / 2) return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ ) def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : List[str] , ) -> np.ndarray: """simple docstring""" return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ ) def lowerCamelCase__ (self : Tuple , _UpperCAmelCase : ImageInput , _UpperCAmelCase : bool = None , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : PILImageResampling = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : float = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray: """simple docstring""" if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) if offset and not do_rescale: raise ValueError("""For offset, do_rescale must also be set to True.""" ) # All transformations expect numpy arrays. lowercase__ = to_numpy_array(lowercase_ ) if do_resize: lowercase__ = self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) if do_center_crop: lowercase__ = self.center_crop(lowercase_ , size=lowercase_ ) if do_rescale: lowercase__ = self.rescale(image=lowercase_ , scale=lowercase_ , offset=lowercase_ ) if do_normalize: lowercase__ = self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) lowercase__ = to_channel_dimension_format(lowercase_ , lowercase_ ) return image def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : ImageInput , _UpperCAmelCase : bool = None , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : PILImageResampling = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : float = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCAmelCase : int , ) -> PIL.Image.Image: """simple docstring""" lowercase__ = do_resize if do_resize is not None else self.do_resize lowercase__ = resample if resample is not None else self.resample lowercase__ = do_center_crop if do_center_crop is not None else self.do_center_crop lowercase__ = do_rescale if do_rescale is not None else self.do_rescale lowercase__ = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase__ = offset if offset is not None else self.offset lowercase__ = do_normalize if do_normalize is not None else self.do_normalize lowercase__ = image_mean if image_mean is not None else self.image_mean lowercase__ = image_std if image_std is not None else self.image_std lowercase__ = size if size is not None else self.size lowercase__ = get_size_dict(lowercase_ , default_to_square=lowercase_ ) lowercase__ = crop_size if crop_size is not None else self.crop_size lowercase__ = get_size_dict(lowercase_ , param_name="""crop_size""" ) if not valid_images(lowercase_ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) lowercase__ = make_batched(lowercase_ ) lowercase__ = [ [ self._preprocess_image( image=lowercase_ , do_resize=lowercase_ , size=lowercase_ , resample=lowercase_ , do_center_crop=lowercase_ , crop_size=lowercase_ , do_rescale=lowercase_ , rescale_factor=lowercase_ , offset=lowercase_ , do_normalize=lowercase_ , image_mean=lowercase_ , image_std=lowercase_ , data_format=lowercase_ , ) for img in video ] for video in videos ] lowercase__ = {"pixel_values": videos} return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
305
import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict): # Initialise PyTorch model lowercase__ : List[str] = BertConfig.from_json_file(_lowerCamelCase) print(f'''Building PyTorch model from configuration: {config}''') lowercase__ : Optional[Any] = BertForPreTraining(_lowerCamelCase) # Load weights from tf checkpoint load_tf_weights_in_bert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''') torch.save(model.state_dict() , _lowerCamelCase) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--bert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained BERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) UpperCamelCase = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
87
0
from arguments import InitializationArguments from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser # Configuration _lowercase : int =HfArgumentParser(InitializationArguments) _lowercase : List[str] =parser.parse_args() # Load codeparrot tokenizer trained for Python code tokenization _lowercase : List[str] =AutoTokenizer.from_pretrained(args.tokenizer_name) # Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks _lowercase : Tuple ={ "vocab_size": len(tokenizer), "scale_attn_by_inverse_layer_idx": True, "reorder_and_upcast_attn": True, } # Load model config (GPT-2 large in this case) _lowercase : Tuple =AutoConfig.from_pretrained(args.config_name, **config_kwargs) # Initialize new model with config _lowercase : List[Any] =AutoModelForCausalLM.from_config(config) # Save model to the hub model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
170
import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse('''3.8'''): import importlib_metadata else: import importlib.metadata as importlib_metadata def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : List[str]=False): try: lowercase__ : Union[str, Any] = os.environ[key] except KeyError: # KEY isn't set, default to `default`. lowercase__ : int = default else: # KEY is set, convert it to True or False. try: lowercase__ : Optional[int] = strtobool(_lowerCamelCase) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f'''If set, {key} must be yes or no.''') return _value UpperCamelCase = parse_flag_from_env('''RUN_SLOW''', default=False) UpperCamelCase = parse_flag_from_env('''RUN_REMOTE''', default=False) UpperCamelCase = parse_flag_from_env('''RUN_LOCAL''', default=True) UpperCamelCase = parse_flag_from_env('''RUN_PACKAGED''', default=True) # Compression UpperCamelCase = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''') UpperCamelCase = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''') UpperCamelCase = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''') # Audio UpperCamelCase = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''), reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''', ) # Beam UpperCamelCase = pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''), reason='''test requires apache-beam and a compatible dill version''', ) # Dill-cloudpickle compatibility UpperCamelCase = pytest.mark.skipif( config.DILL_VERSION <= version.parse('''0.3.2'''), reason='''test requires dill>0.3.2 for cloudpickle compatibility''', ) # Windows UpperCamelCase = pytest.mark.skipif( sys.platform == '''win32''', reason='''test should not be run on Windows''', ) def lowercase_ ( _lowerCamelCase : int): try: import faiss # noqa except ImportError: lowercase__ : Optional[Any] = unittest.skip("test requires faiss")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): try: import regex # noqa except ImportError: lowercase__ : List[Any] = unittest.skip("test requires regex")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): try: import elasticsearch # noqa except ImportError: lowercase__ : Optional[int] = unittest.skip("test requires elasticsearch")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Union[str, Any]): try: import sqlalchemy # noqa except ImportError: lowercase__ : Optional[int] = unittest.skip("test requires sqlalchemy")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): if not config.TORCH_AVAILABLE: lowercase__ : Tuple = unittest.skip("test requires PyTorch")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Tuple): if not config.TF_AVAILABLE: lowercase__ : Any = unittest.skip("test requires TensorFlow")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Dict): if not config.JAX_AVAILABLE: lowercase__ : List[str] = unittest.skip("test requires JAX")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): if not config.PIL_AVAILABLE: lowercase__ : Dict = unittest.skip("test requires Pillow")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Tuple): try: import transformers # noqa F401 except ImportError: return unittest.skip("test requires transformers")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : Optional[Any]): try: import tiktoken # noqa F401 except ImportError: return unittest.skip("test requires tiktoken")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : Dict): try: import spacy # noqa F401 except ImportError: return unittest.skip("test requires spacy")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : Optional[int]): def _require_spacy_model(_lowerCamelCase : Optional[int]): try: import spacy # noqa F401 spacy.load(_lowerCamelCase) except ImportError: return unittest.skip("test requires spacy")(_lowerCamelCase) except OSError: return unittest.skip("test requires spacy model '{}'".format(_lowerCamelCase))(_lowerCamelCase) else: return test_case return _require_spacy_model def lowercase_ ( _lowerCamelCase : Dict): try: import pyspark # noqa F401 except ImportError: return unittest.skip("test requires pyspark")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : List[str]): try: import joblibspark # noqa F401 except ImportError: return unittest.skip("test requires joblibspark")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : Dict): if not _run_slow_tests or _run_slow_tests == 0: lowercase__ : Tuple = unittest.skip("test is slow")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): if not _run_local_tests or _run_local_tests == 0: lowercase__ : str = unittest.skip("test is local")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Optional[int]): if not _run_packaged_tests or _run_packaged_tests == 0: lowercase__ : List[Any] = unittest.skip("test is packaged")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Tuple): if not _run_remote_tests or _run_remote_tests == 0: lowercase__ : Union[str, Any] = unittest.skip("test requires remote")(_lowerCamelCase) return test_case def lowercase_ ( *_lowerCamelCase : str): def decorate(cls : str): for name, fn in cls.__dict__.items(): if callable(_lowerCamelCase) and name.startswith("test"): for decorator in decorators: lowercase__ : Optional[int] = decorator(_lowerCamelCase) setattr(cls , _lowerCamelCase , _lowerCamelCase) return cls return decorate class snake_case_ ( __A ): pass class snake_case_ ( __A ): __A : List[Any] = 0 __A : str = 1 __A : int = 2 @contextmanager def lowercase_ ( _lowerCamelCase : List[str]=OfflineSimulationMode.CONNECTION_FAILS , _lowerCamelCase : int=1E-16): lowercase__ : int = requests.Session().request def timeout_request(_lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Dict , **_lowerCamelCase : str): # Change the url to an invalid url so that the connection hangs lowercase__ : Any = "https://10.255.255.1" if kwargs.get("timeout") is None: raise RequestWouldHangIndefinitelyError( f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''') lowercase__ : Dict = timeout try: return online_request(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier lowercase__ : Dict = url lowercase__ : Union[str, Any] = e.args[0] lowercase__ : Optional[Any] = (max_retry_error.args[0].replace("10.255.255.1" , f'''OfflineMock[{url}]'''),) lowercase__ : int = (max_retry_error,) raise def raise_connection_error(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] , **_lowerCamelCase : Tuple): raise requests.ConnectionError("Offline mode is enabled." , request=_lowerCamelCase) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch("requests.Session.send" , _lowerCamelCase): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch("requests.Session.request" , _lowerCamelCase): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase): yield else: raise ValueError("Please use a value from the OfflineSimulationMode enum.") @contextmanager def lowercase_ ( *_lowerCamelCase : str , **_lowerCamelCase : Tuple): lowercase__ : Dict = str(Path().resolve()) with tempfile.TemporaryDirectory(*_lowerCamelCase , **_lowerCamelCase) as tmp_dir: try: os.chdir(_lowerCamelCase) yield finally: os.chdir(_lowerCamelCase) @contextmanager def lowercase_ ( ): import gc gc.collect() lowercase__ : Union[str, Any] = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def lowercase_ ( ): import gc gc.collect() lowercase__ : int = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]): return deepcopy(_lowerCamelCase).integers(0 , 100 , 10).tolist() == deepcopy(_lowerCamelCase).integers(0 , 100 , 10).tolist() def lowercase_ ( _lowerCamelCase : str): import decorator from requests.exceptions import HTTPError def _wrapper(_lowerCamelCase : str , *_lowerCamelCase : Dict , **_lowerCamelCase : Dict): try: return func(*_lowerCamelCase , **_lowerCamelCase) except HTTPError as err: if str(_lowerCamelCase).startswith("500") or str(_lowerCamelCase).startswith("502"): pytest.xfail(str(_lowerCamelCase)) raise err return decorator.decorator(_wrapper , _lowerCamelCase) class snake_case_ : def __init__( self : int , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : List[str] ) -> List[str]: lowercase__ : Tuple = returncode lowercase__ : int = stdout lowercase__ : Union[str, Any] = stderr async def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict): while True: lowercase__ : Optional[int] = await stream.readline() if line: callback(_lowerCamelCase) else: break async def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : int=None , _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Tuple=False): if echo: print("\nRunning: " , " ".join(_lowerCamelCase)) lowercase__ : Optional[int] = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) lowercase__ : str = [] lowercase__ : List[str] = [] def tee(_lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int]=""): lowercase__ : Optional[int] = line.decode("utf-8").rstrip() sink.append(_lowerCamelCase) if not quiet: print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label="stdout:")), _read_stream(p.stderr , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label="stderr:")), ] , timeout=_lowerCamelCase , ) return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase) def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str]=None , _lowerCamelCase : Dict=None , _lowerCamelCase : int=180 , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Optional[Any]=True): lowercase__ : Any = asyncio.get_event_loop() lowercase__ : Tuple = loop.run_until_complete( _stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase)) lowercase__ : int = " ".join(_lowerCamelCase) if result.returncode > 0: lowercase__ : Any = "\n".join(result.stderr) raise RuntimeError( f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n''' f'''The combined stderr from workers follows:\n{stderr}''') # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(f'''\'{cmd_str}\' produced no output.''') return result def lowercase_ ( ): lowercase__ : List[str] = os.environ.get("PYTEST_XDIST_WORKER" , "gw0") lowercase__ : str = re.sub(R"^gw" , "" , _lowerCamelCase , 0 , re.M) return int(_lowerCamelCase) def lowercase_ ( ): lowercase__ : Union[str, Any] = 2_9500 lowercase__ : Optional[int] = pytest_xdist_worker_id() return port + uniq_delta
87
0
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowercase_ ( __A , unittest.TestCase ): __UpperCAmelCase = DiTPipeline __UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS __UpperCAmelCase = PipelineTesterMixin.required_optional_params - { "latents", "num_images_per_prompt", "callback", "callback_steps", } __UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS __UpperCAmelCase = False def __a ( self ): torch.manual_seed(0 ) UpperCamelCase__ = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowercase_ , activation_fn="gelu-approximate" , num_embeds_ada_norm=10_00 , norm_type="ada_norm_zero" , norm_elementwise_affine=lowercase_ , ) UpperCamelCase__ = AutoencoderKL() UpperCamelCase__ = DDIMScheduler() UpperCamelCase__ = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler} return components def __a ( self , a , a=0 ): if str(lowercase_ ).startswith("mps" ): UpperCamelCase__ = torch.manual_seed(lowercase_ ) else: UpperCamelCase__ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) UpperCamelCase__ = { "class_labels": [1], "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def __a ( self ): UpperCamelCase__ = "cpu" UpperCamelCase__ = self.get_dummy_components() UpperCamelCase__ = self.pipeline_class(**lowercase_ ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) UpperCamelCase__ = self.get_dummy_inputs(lowercase_ ) UpperCamelCase__ = pipe(**lowercase_ ).images UpperCamelCase__ = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) UpperCamelCase__ = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] ) UpperCamelCase__ = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowercase_ , 1e-3 ) def __a ( self ): self._test_inference_batch_single_identical(relax_max_difference=lowercase_ , expected_max_diff=1e-3 ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def __a ( self ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @require_torch_gpu @slow class lowercase_ ( unittest.TestCase ): def __a ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self ): UpperCamelCase__ = torch.manual_seed(0 ) UpperCamelCase__ = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" ) pipe.to("cuda" ) UpperCamelCase__ = ["vase", "umbrella", "white shark", "white wolf"] UpperCamelCase__ = pipe.get_label_ids(lowercase_ ) UpperCamelCase__ = pipe(lowercase_ , generator=lowercase_ , num_inference_steps=40 , output_type="np" ).images for word, image in zip(lowercase_ , lowercase_ ): UpperCamelCase__ = load_numpy( f'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' ) assert np.abs((expected_image - image).max() ) < 1e-2 def __a ( self ): UpperCamelCase__ = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" ) UpperCamelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to("cuda" ) UpperCamelCase__ = ["vase", "umbrella"] UpperCamelCase__ = pipe.get_label_ids(lowercase_ ) UpperCamelCase__ = torch.manual_seed(0 ) UpperCamelCase__ = pipe(lowercase_ , generator=lowercase_ , num_inference_steps=25 , output_type="np" ).images for word, image in zip(lowercase_ , lowercase_ ): UpperCamelCase__ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" f'''/dit/{word}_512.npy''' ) assert np.abs((expected_image - image).max() ) < 1e-1
80
import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def lowercase_ ( _lowerCamelCase : int): lowercase__ : int = [] embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''', f'''stage{idx}.patch_embed.proj.weight''', )) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''', f'''stage{idx}.patch_embed.proj.bias''', )) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''', f'''stage{idx}.patch_embed.norm.weight''', )) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''', f'''stage{idx}.patch_embed.norm.bias''', )) return embed def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : int): lowercase__ : Optional[Any] = [] attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj.bias''', )) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''')) return attention_weights def lowercase_ ( _lowerCamelCase : Optional[int]): lowercase__ : Tuple = [] token.append((f'''cvt.encoder.stages.{idx}.cls_token''', "stage2.cls_token")) return token def lowercase_ ( ): lowercase__ : List[str] = [] head.append(("layernorm.weight", "norm.weight")) head.append(("layernorm.bias", "norm.bias")) head.append(("classifier.weight", "head.weight")) head.append(("classifier.bias", "head.bias")) return head def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]): lowercase__ : Optional[Any] = "imagenet-1k-id2label.json" lowercase__ : List[str] = 1000 lowercase__ : Dict = "huggingface/label-files" lowercase__ : List[Any] = num_labels lowercase__ : Tuple = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset")) , "r")) lowercase__ : Tuple = {int(_lowerCamelCase): v for k, v in idalabel.items()} lowercase__ : Any = idalabel lowercase__ : List[Any] = {v: k for k, v in idalabel.items()} lowercase__ : Optional[int] = CvtConfig(num_labels=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit("/" , 1)[-1][4:6] == "13": lowercase__ : Any = [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit("/" , 1)[-1][4:6] == "21": lowercase__ : Tuple = [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: lowercase__ : Union[str, Any] = [2, 2, 20] lowercase__ : Optional[Any] = [3, 12, 16] lowercase__ : Optional[Any] = [192, 768, 1024] lowercase__ : Union[str, Any] = CvtForImageClassification(_lowerCamelCase) lowercase__ : Tuple = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k") lowercase__ : int = image_size lowercase__ : Dict = torch.load(_lowerCamelCase , map_location=torch.device("cpu")) lowercase__ : Any = OrderedDict() lowercase__ : int = [] for idx in range(len(config.depth)): if config.cls_token[idx]: lowercase__ : Dict = list_of_state_dict + cls_token(_lowerCamelCase) lowercase__ : List[str] = list_of_state_dict + embeddings(_lowerCamelCase) for cnt in range(config.depth[idx]): lowercase__ : Any = list_of_state_dict + attention(_lowerCamelCase , _lowerCamelCase) lowercase__ : List[str] = list_of_state_dict + final() for gg in list_of_state_dict: print(_lowerCamelCase) for i in range(len(_lowerCamelCase)): lowercase__ : Dict = original_weights[list_of_state_dict[i][1]] model.load_state_dict(_lowerCamelCase) model.save_pretrained(_lowerCamelCase) image_processor.save_pretrained(_lowerCamelCase) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument( '''--cvt_model''', default='''cvt-w24''', type=str, help='''Name of the cvt model you\'d like to convert.''', ) parser.add_argument( '''--image_size''', default=384, type=int, help='''Input Image Size''', ) parser.add_argument( '''--cvt_file_name''', default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''', type=str, help='''Input Image Size''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) UpperCamelCase = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
87
0
from typing import Any class A : def __init__( self : Any , lowercase_ : Any ) -> List[str]: """simple docstring""" _lowerCamelCase : str =data _lowerCamelCase : List[str] =None class A : def __init__( self : Tuple ) -> List[str]: """simple docstring""" _lowerCamelCase : List[str] =None def lowerCamelCase ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" _lowerCamelCase : Any =self.head while temp is not None: print(temp.data , end=' ' ) _lowerCamelCase : Tuple =temp.next print() def lowerCamelCase ( self : List[Any] , lowercase_ : Any ) -> Tuple: """simple docstring""" _lowerCamelCase : Dict =Node(lowercase_ ) _lowerCamelCase : Optional[int] =self.head _lowerCamelCase : Union[str, Any] =new_node def lowerCamelCase ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : str ) -> Optional[int]: """simple docstring""" if node_data_a == node_data_a: return else: _lowerCamelCase : Optional[Any] =self.head while node_a is not None and node_a.data != node_data_a: _lowerCamelCase : str =node_a.next _lowerCamelCase : Dict =self.head while node_a is not None and node_a.data != node_data_a: _lowerCamelCase : Any =node_a.next if node_a is None or node_a is None: return _lowerCamelCase : Optional[int] =node_a.data, node_a.data if __name__ == "__main__": lowerCamelCase = LinkedList() for i in range(5, 0, -1): ll.push(i) ll.print_list() ll.swap_nodes(1, 4) print('After swapping') ll.print_list()
199
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase = { '''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''], '''tokenization_electra''': ['''ElectraTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''ElectraTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ElectraForCausalLM''', '''ElectraForMaskedLM''', '''ElectraForMultipleChoice''', '''ElectraForPreTraining''', '''ElectraForQuestionAnswering''', '''ElectraForSequenceClassification''', '''ElectraForTokenClassification''', '''ElectraModel''', '''ElectraPreTrainedModel''', '''load_tf_weights_in_electra''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFElectraForMaskedLM''', '''TFElectraForMultipleChoice''', '''TFElectraForPreTraining''', '''TFElectraForQuestionAnswering''', '''TFElectraForSequenceClassification''', '''TFElectraForTokenClassification''', '''TFElectraModel''', '''TFElectraPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''FlaxElectraForCausalLM''', '''FlaxElectraForMaskedLM''', '''FlaxElectraForMultipleChoice''', '''FlaxElectraForPreTraining''', '''FlaxElectraForQuestionAnswering''', '''FlaxElectraForSequenceClassification''', '''FlaxElectraForTokenClassification''', '''FlaxElectraModel''', '''FlaxElectraPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
87
0
"""simple docstring""" import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class _UpperCAmelCase ( __A ): def A ( self : Optional[Any] ) -> List[str]: lowercase_ : str = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(lowercase_ , '''width_multiplier''' ) ) class _UpperCAmelCase : def __init__( self : Optional[int] , A : Union[str, Any] , A : List[str]=13 , A : List[str]=64 , A : Any=2 , A : Dict=3 , A : Tuple="swish" , A : List[Any]=3 , A : List[Any]=32 , A : str=0.1 , A : Any=0.02 , A : Optional[int]=True , A : List[str]=True , A : Optional[Any]=10 , A : List[str]=None , A : int=0.25 , A : List[str]=0.0 , A : str=0.0 , ) -> Dict: lowercase_ : Optional[int] = parent lowercase_ : Optional[Any] = batch_size lowercase_ : str = image_size lowercase_ : Union[str, Any] = patch_size lowercase_ : Tuple = num_channels lowercase_ : Optional[int] = make_divisible(5_12 * width_multiplier , divisor=8 ) lowercase_ : Optional[Any] = hidden_act lowercase_ : Union[str, Any] = conv_kernel_size lowercase_ : Tuple = output_stride lowercase_ : Dict = classifier_dropout_prob lowercase_ : Optional[int] = use_labels lowercase_ : Dict = is_training lowercase_ : Optional[int] = num_labels lowercase_ : Tuple = initializer_range lowercase_ : List[Any] = scope lowercase_ : List[Any] = width_multiplier lowercase_ : Optional[Any] = ffn_dropout lowercase_ : List[Any] = attn_dropout def A ( self : str ) -> str: lowercase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase_ : Union[str, Any] = None lowercase_ : List[Any] = None if self.use_labels: lowercase_ : int = ids_tensor([self.batch_size] , self.num_labels ) lowercase_ : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) lowercase_ : Dict = self.get_config() return config, pixel_values, labels, pixel_labels def A ( self : Any ) -> List[str]: return MobileViTVaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , ) def A ( self : List[Any] , A : str , A : List[Any] , A : List[Any] , A : Dict ) -> Union[str, Any]: lowercase_ : Any = MobileViTVaModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : List[str] = model(lowercase_ ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def A ( self : List[str] , A : Optional[int] , A : Tuple , A : Union[str, Any] , A : Optional[Any] ) -> Tuple: lowercase_ : Optional[Any] = self.num_labels lowercase_ : List[Any] = MobileViTVaForImageClassification(lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : Any = model(lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : Union[str, Any] , A : Union[str, Any] , A : Optional[int] , A : Optional[int] , A : Tuple ) -> Union[str, Any]: lowercase_ : Any = self.num_labels lowercase_ : Dict = MobileViTVaForSemanticSegmentation(lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : List[Any] = model(lowercase_ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) lowercase_ : Tuple = model(lowercase_ , labels=lowercase_ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def A ( self : Tuple ) -> Optional[Any]: lowercase_ : Tuple = self.prepare_config_and_inputs() lowercase_ : Dict = config_and_inputs lowercase_ : List[Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _UpperCAmelCase ( __A , __A , unittest.TestCase ): SCREAMING_SNAKE_CASE_ : Optional[int] = ( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) SCREAMING_SNAKE_CASE_ : Tuple = ( { "feature-extraction": MobileViTVaModel, "image-classification": MobileViTVaForImageClassification, "image-segmentation": MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ : List[Any] = False SCREAMING_SNAKE_CASE_ : List[str] = False SCREAMING_SNAKE_CASE_ : List[str] = False SCREAMING_SNAKE_CASE_ : List[Any] = False def A ( self : str ) -> List[Any]: lowercase_ : int = MobileViTVaModelTester(self ) lowercase_ : Optional[int] = MobileViTVaConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ ) def A ( self : Any ) -> Union[str, Any]: self.config_tester.run_common_tests() @unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' ) def A ( self : str ) -> List[Any]: pass @unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' ) def A ( self : Any ) -> Optional[int]: pass @unittest.skip(reason='''MobileViTV2 does not output attentions''' ) def A ( self : str ) -> Optional[int]: pass @require_torch_multi_gpu @unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' ) def A ( self : Any ) -> int: pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def A ( self : str ) -> Optional[int]: pass def A ( self : Optional[int] ) -> Union[str, Any]: lowercase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : Union[str, Any] = model_class(lowercase_ ) lowercase_ : List[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase_ : List[str] = [*signature.parameters.keys()] lowercase_ : List[str] = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowercase_ ) def A ( self : Union[str, Any] ) -> Any: lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def A ( self : Union[str, Any] ) -> Dict: def check_hidden_states_output(A : int , A : Dict , A : Dict ): lowercase_ : Any = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): lowercase_ : Optional[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) lowercase_ : Optional[int] = outputs.hidden_states lowercase_ : Any = 5 self.assertEqual(len(lowercase_ ) , lowercase_ ) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. lowercase_ : Optional[int] = 2 for i in range(len(lowercase_ ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : Tuple = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase_ : Optional[int] = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) def A ( self : List[Any] ) -> str: lowercase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase_ ) def A ( self : Any ) -> int: lowercase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*lowercase_ ) @slow def A ( self : Union[str, Any] ) -> List[str]: for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : Tuple = MobileViTVaModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) def lowercase ( ): lowercase_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class _UpperCAmelCase ( unittest.TestCase ): @cached_property def A ( self : List[Any] ) -> Optional[Any]: return ( MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ) if is_vision_available() else None ) @slow def A ( self : Union[str, Any] ) -> List[str]: lowercase_ : List[str] = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to( lowercase_ ) lowercase_ : int = self.default_image_processor lowercase_ : int = prepare_img() lowercase_ : Optional[int] = image_processor(images=lowercase_ , return_tensors='''pt''' ).to(lowercase_ ) # forward pass with torch.no_grad(): lowercase_ : str = model(**lowercase_ ) # verify the logits lowercase_ : Optional[int] = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , lowercase_ ) lowercase_ : Tuple = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] ).to(lowercase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4 ) ) @slow def A ( self : Tuple ) -> Optional[int]: lowercase_ : Dict = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' ) lowercase_ : int = model.to(lowercase_ ) lowercase_ : Any = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' ) lowercase_ : List[str] = prepare_img() lowercase_ : int = image_processor(images=lowercase_ , return_tensors='''pt''' ).to(lowercase_ ) # forward pass with torch.no_grad(): lowercase_ : str = model(**lowercase_ ) lowercase_ : Tuple = outputs.logits # verify the logits lowercase_ : Union[str, Any] = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , lowercase_ ) lowercase_ : Optional[Any] = torch.tensor( [ [[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]], [[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]], [[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]], ] , device=lowercase_ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowercase_ , atol=1e-4 ) ) @slow def A ( self : Any ) -> List[str]: lowercase_ : Dict = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' ) lowercase_ : int = model.to(lowercase_ ) lowercase_ : Optional[int] = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' ) lowercase_ : List[str] = prepare_img() lowercase_ : Tuple = image_processor(images=lowercase_ , return_tensors='''pt''' ).to(lowercase_ ) # forward pass with torch.no_grad(): lowercase_ : Optional[Any] = model(**lowercase_ ) lowercase_ : Tuple = outputs.logits.detach().cpu() lowercase_ : List[str] = image_processor.post_process_semantic_segmentation(outputs=lowercase_ , target_sizes=[(50, 60)] ) lowercase_ : List[str] = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , lowercase_ ) lowercase_ : int = image_processor.post_process_semantic_segmentation(outputs=lowercase_ ) lowercase_ : Dict = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , lowercase_ )
33
import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case_ ( __A ,unittest.TestCase ): __A : Union[str, Any] = LEDTokenizer __A : Union[str, Any] = LEDTokenizerFast __A : Optional[Any] = True def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]: super().setUp() lowercase__ : List[str] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] lowercase__ : Optional[int] = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) ) lowercase__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] lowercase__ : Tuple = {"unk_token": "<unk>"} lowercase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowercase_ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowercase_ ) ) def __UpperCamelCase ( self : int , **lowercase_ : str ) -> List[Any]: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ ) def __UpperCamelCase ( self : List[Any] , **lowercase_ : Any ) -> List[Any]: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ ) def __UpperCamelCase ( self : str , lowercase_ : Any ) -> Tuple: return "lower newer", "lower newer" @cached_property def __UpperCamelCase ( self : Tuple ) -> Optional[Any]: return LEDTokenizer.from_pretrained("allenai/led-base-16384" ) @cached_property def __UpperCamelCase ( self : Tuple ) -> int: return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" ) @require_torch def __UpperCamelCase ( self : int ) -> List[Any]: lowercase__ : Union[str, Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."] lowercase__ : str = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : Dict = tokenizer(lowercase_ , max_length=len(lowercase_ ) , padding=lowercase_ , return_tensors="pt" ) self.assertIsInstance(lowercase_ , lowercase_ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) lowercase__ : Union[str, Any] = batch.input_ids.tolist()[0] self.assertListEqual(lowercase_ , lowercase_ ) @require_torch def __UpperCamelCase ( self : List[str] ) -> Tuple: lowercase__ : Dict = ["A long paragraph for summarization.", "Another paragraph for summarization."] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : Optional[int] = tokenizer(lowercase_ , padding=lowercase_ , return_tensors="pt" ) self.assertIn("input_ids" , lowercase_ ) self.assertIn("attention_mask" , lowercase_ ) self.assertNotIn("labels" , lowercase_ ) self.assertNotIn("decoder_attention_mask" , lowercase_ ) @require_torch def __UpperCamelCase ( self : Optional[Any] ) -> Any: lowercase__ : Dict = [ "Summary of the text.", "Another summary.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : Dict = tokenizer(text_target=lowercase_ , max_length=32 , padding="max_length" , return_tensors="pt" ) self.assertEqual(32 , targets["input_ids"].shape[1] ) @require_torch def __UpperCamelCase ( self : Optional[int] ) -> Tuple: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : int = tokenizer( ["I am a small frog" * 10_24, "I am a small frog"] , padding=lowercase_ , truncation=lowercase_ , return_tensors="pt" ) self.assertIsInstance(lowercase_ , lowercase_ ) self.assertEqual(batch.input_ids.shape , (2, 51_22) ) @require_torch def __UpperCamelCase ( self : List[str] ) -> Any: lowercase__ : Union[str, Any] = ["A long paragraph for summarization."] lowercase__ : List[Any] = [ "Summary of the text.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : List[Any] = tokenizer(lowercase_ , return_tensors="pt" ) lowercase__ : Dict = tokenizer(text_target=lowercase_ , return_tensors="pt" ) lowercase__ : Optional[int] = inputs["input_ids"] lowercase__ : str = targets["input_ids"] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def __UpperCamelCase ( self : Union[str, Any] ) -> Dict: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : int = ["Summary of the text.", "Another summary."] lowercase__ : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] lowercase__ : Tuple = tokenizer(lowercase_ , padding=lowercase_ ) lowercase__ : int = [[0] * len(lowercase_ ) for x in encoded_output["input_ids"]] lowercase__ : Any = tokenizer.pad(lowercase_ ) self.assertSequenceEqual(outputs["global_attention_mask"] , lowercase_ ) def __UpperCamelCase ( self : int ) -> Union[str, Any]: pass def __UpperCamelCase ( self : int ) -> Optional[Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) lowercase__ : List[str] = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) lowercase__ : List[Any] = "A, <mask> AllenNLP sentence." lowercase__ : Tuple = tokenizer_r.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ ) lowercase__ : List[str] = tokenizer_p.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ ) self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) ) self.assertEqual( sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , ) lowercase__ : Any = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] ) lowercase__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] ) self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
87
0
'''simple docstring''' import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device A__: str = False class A__ ( unittest.TestCase ): pass @nightly @require_torch_gpu class A__ ( unittest.TestCase ): def __UpperCAmelCase ( self :Optional[Any] ) -> Tuple: '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self :Any ) -> Union[str, Any]: '''simple docstring''' _a : List[str] =VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) _a : Optional[int] =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" ) _a : Tuple =torch.manual_seed(0 ) _a : List[Any] =pipe.dual_guided( prompt="""first prompt""" , image=lowercase_ , text_to_image_strength=0.75 , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowercase_ ) _a : List[str] =VersatileDiffusionPipeline.from_pretrained(lowercase_ , torch_dtype=torch.floataa ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) _a : Any =generator.manual_seed(0 ) _a : Optional[Any] =pipe.dual_guided( prompt="""first prompt""" , image=lowercase_ , text_to_image_strength=0.75 , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass" def __UpperCAmelCase ( self :Union[str, Any] ) -> int: '''simple docstring''' _a : str =VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) _a : Tuple ="cyberpunk 2077" _a : str =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" ) _a : Any =torch.manual_seed(0 ) _a : Any =pipe.dual_guided( prompt=lowercase_ , image=lowercase_ , text_to_image_strength=0.75 , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="""numpy""" , ).images _a : Optional[int] =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) _a : Optional[int] =np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 _a : str ="A painting of a squirrel eating a burger " _a : Any =torch.manual_seed(0 ) _a : List[str] =pipe.text_to_image( prompt=lowercase_ , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="""numpy""" ).images _a : Any =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) _a : Optional[int] =np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 _a : Union[str, Any] =pipe.image_variation(lowercase_ , generator=lowercase_ , output_type="""numpy""" ).images _a : Optional[Any] =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) _a : int =np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
276
import math from typing import Any, Callable, List, Optional, Tuple, Union import numpy as np import torch from ...models import TaFilmDecoder from ...schedulers import DDPMScheduler from ...utils import is_onnx_available, logging, randn_tensor if is_onnx_available(): from ..onnx_utils import OnnxRuntimeModel from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .continous_encoder import SpectrogramContEncoder from .notes_encoder import SpectrogramNotesEncoder UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name UpperCamelCase = 256 class snake_case_ ( __A ): __A : str = ["melgan"] def __init__( self : str , lowercase_ : SpectrogramNotesEncoder , lowercase_ : SpectrogramContEncoder , lowercase_ : TaFilmDecoder , lowercase_ : DDPMScheduler , lowercase_ : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None: super().__init__() # From MELGAN lowercase__ : List[Any] = math.log(1E-5 ) # Matches MelGAN training. lowercase__ : str = 4.0 # Largest value for most examples lowercase__ : Any = 1_28 self.register_modules( notes_encoder=lowercase_ , continuous_encoder=lowercase_ , decoder=lowercase_ , scheduler=lowercase_ , melgan=lowercase_ , ) def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str]=(-1.0, 1.0) , lowercase_ : Dict=False ) -> Optional[Any]: lowercase__ , lowercase__ : int = output_range if clip: lowercase__ : Optional[Any] = torch.clip(lowercase_ , self.min_value , self.max_value ) # Scale to [0, 1]. lowercase__ : List[str] = (features - self.min_value) / (self.max_value - self.min_value) # Scale to [min_out, max_out]. return zero_one * (max_out - min_out) + min_out def __UpperCamelCase ( self : Optional[int] , lowercase_ : List[str] , lowercase_ : List[str]=(-1.0, 1.0) , lowercase_ : List[Any]=False ) -> Union[str, Any]: lowercase__ , lowercase__ : Tuple = input_range lowercase__ : Optional[Any] = torch.clip(lowercase_ , lowercase_ , lowercase_ ) if clip else outputs # Scale to [0, 1]. lowercase__ : Union[str, Any] = (outputs - min_out) / (max_out - min_out) # Scale to [self.min_value, self.max_value]. return zero_one * (self.max_value - self.min_value) + self.min_value def __UpperCamelCase ( self : List[str] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Tuple ) -> List[str]: lowercase__ : Optional[Any] = input_tokens > 0 lowercase__ , lowercase__ : int = self.notes_encoder( encoder_input_tokens=lowercase_ , encoder_inputs_mask=lowercase_ ) lowercase__ , lowercase__ : List[Any] = self.continuous_encoder( encoder_inputs=lowercase_ , encoder_inputs_mask=lowercase_ ) return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)] def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : str ) -> Tuple: lowercase__ : Union[str, Any] = noise_time if not torch.is_tensor(lowercase_ ): lowercase__ : Optional[Any] = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device ) elif torch.is_tensor(lowercase_ ) and len(timesteps.shape ) == 0: lowercase__ : Optional[Any] = timesteps[None].to(input_tokens.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML lowercase__ : int = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device ) lowercase__ : str = self.decoder( encodings_and_masks=lowercase_ , decoder_input_tokens=lowercase_ , decoder_noise_time=lowercase_ ) return logits @torch.no_grad() def __call__( self : List[str] , lowercase_ : List[List[int]] , lowercase_ : Optional[torch.Generator] = None , lowercase_ : int = 1_00 , lowercase_ : bool = True , lowercase_ : str = "numpy" , lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]: if (callback_steps is None) or ( callback_steps is not None and (not isinstance(lowercase_ , lowercase_ ) or callback_steps <= 0) ): raise ValueError( F'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' F''' {type(lowercase_ )}.''' ) lowercase__ : str = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa ) lowercase__ : Optional[int] = np.zeros([1, 0, self.n_dims] , np.floataa ) lowercase__ : str = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device ) for i, encoder_input_tokens in enumerate(lowercase_ ): if i == 0: lowercase__ : Union[str, Any] = torch.from_numpy(pred_mel[:1].copy() ).to( device=self.device , dtype=self.decoder.dtype ) # The first chunk has no previous context. lowercase__ : List[str] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device ) else: # The full song pipeline does not feed in a context feature, so the mask # will be all 0s after the feature converter. Because we know we're # feeding in a full context chunk from the previous prediction, set it # to all 1s. lowercase__ : str = ones lowercase__ : str = self.scale_features( lowercase_ , output_range=[-1.0, 1.0] , clip=lowercase_ ) lowercase__ : str = self.encode( input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=lowercase_ , continuous_mask=lowercase_ , ) # Sample encoder_continuous_inputs shaped gaussian noise to begin loop lowercase__ : List[str] = randn_tensor( shape=encoder_continuous_inputs.shape , generator=lowercase_ , device=self.device , dtype=self.decoder.dtype , ) # set step values self.scheduler.set_timesteps(lowercase_ ) # Denoising diffusion loop for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): lowercase__ : Optional[int] = self.decode( encodings_and_masks=lowercase_ , input_tokens=lowercase_ , noise_time=t / self.scheduler.config.num_train_timesteps , ) # Compute previous output: x_t -> x_t-1 lowercase__ : Optional[Any] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample lowercase__ : Tuple = self.scale_to_features(lowercase_ , input_range=[-1.0, 1.0] ) lowercase__ : List[str] = mel[:1] lowercase__ : Optional[int] = mel.cpu().float().numpy() lowercase__ : str = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 ) # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(lowercase_ , lowercase_ ) logger.info("Generated segment" , lowercase_ ) if output_type == "numpy" and not is_onnx_available(): raise ValueError( "Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." ) elif output_type == "numpy" and self.melgan is None: raise ValueError( "Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." ) if output_type == "numpy": lowercase__ : Union[str, Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) ) else: lowercase__ : Dict = full_pred_mel if not return_dict: return (output,) return AudioPipelineOutput(audios=lowercase_ )
87
0
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, nightly, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class a_ (__A , unittest.TestCase ): __lowerCAmelCase : Optional[int] = LDMTextToImagePipeline __lowerCAmelCase : int = TEXT_TO_IMAGE_PARAMS - { "negative_prompt", "negative_prompt_embeds", "cross_attention_kwargs", "prompt_embeds", } __lowerCAmelCase : Optional[Any] = PipelineTesterMixin.required_optional_params - { "num_images_per_prompt", "callback", "callback_steps", } __lowerCAmelCase : Any = TEXT_TO_IMAGE_BATCH_PARAMS __lowerCAmelCase : str = False def __UpperCamelCase ( self ): torch.manual_seed(0 ) _lowerCAmelCase : List[Any] = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , ) _lowerCAmelCase : List[str] = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , ) torch.manual_seed(0 ) _lowerCAmelCase : str = AutoencoderKL( block_out_channels=(3_2, 6_4) , in_channels=3 , out_channels=3 , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , latent_channels=4 , ) torch.manual_seed(0 ) _lowerCAmelCase : Optional[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) _lowerCAmelCase : Optional[int] = CLIPTextModel(lowercase_ ) _lowerCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _lowerCAmelCase : List[Any] = { "unet": unet, "scheduler": scheduler, "vqvae": vae, "bert": text_encoder, "tokenizer": tokenizer, } return components def __UpperCamelCase ( self , snake_case_ , snake_case_=0 ): if str(lowercase_ ).startswith("""mps""" ): _lowerCAmelCase : int = torch.manual_seed(lowercase_ ) else: _lowerCAmelCase : List[str] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) _lowerCAmelCase : List[str] = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def __UpperCamelCase ( self ): _lowerCAmelCase : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator _lowerCAmelCase : str = self.get_dummy_components() _lowerCAmelCase : Any = LDMTextToImagePipeline(**lowercase_ ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) _lowerCAmelCase : str = self.get_dummy_inputs(lowercase_ ) _lowerCAmelCase : Union[str, Any] = pipe(**lowercase_ ).images _lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 1_6, 1_6, 3) _lowerCAmelCase : int = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @slow @require_torch_gpu class a_ (unittest.TestCase ): def __UpperCamelCase ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase ( self , snake_case_ , snake_case_=torch.floataa , snake_case_=0 ): _lowerCAmelCase : Dict = torch.manual_seed(lowercase_ ) _lowerCAmelCase : Union[str, Any] = np.random.RandomState(lowercase_ ).standard_normal((1, 4, 3_2, 3_2) ) _lowerCAmelCase : Optional[Any] = torch.from_numpy(lowercase_ ).to(device=lowercase_ , dtype=lowercase_ ) _lowerCAmelCase : Optional[int] = { "prompt": "A painting of a squirrel eating a burger", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def __UpperCamelCase ( self ): _lowerCAmelCase : str = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) _lowerCAmelCase : Tuple = self.get_inputs(lowercase_ ) _lowerCAmelCase : Optional[Any] = pipe(**lowercase_ ).images _lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 2_5_6, 2_5_6, 3) _lowerCAmelCase : List[Any] = np.array([0.5_1825, 0.5_2850, 0.5_2543, 0.5_4258, 0.5_2304, 0.5_2569, 0.5_4363, 0.5_5276, 0.5_6878] ) _lowerCAmelCase : Optional[int] = np.abs(expected_slice - image_slice ).max() assert max_diff < 1E-3 @nightly @require_torch_gpu class a_ (unittest.TestCase ): def __UpperCamelCase ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase ( self , snake_case_ , snake_case_=torch.floataa , snake_case_=0 ): _lowerCAmelCase : Optional[Any] = torch.manual_seed(lowercase_ ) _lowerCAmelCase : str = np.random.RandomState(lowercase_ ).standard_normal((1, 4, 3_2, 3_2) ) _lowerCAmelCase : Dict = torch.from_numpy(lowercase_ ).to(device=lowercase_ , dtype=lowercase_ ) _lowerCAmelCase : Tuple = { "prompt": "A painting of a squirrel eating a burger", "latents": latents, "generator": generator, "num_inference_steps": 5_0, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def __UpperCamelCase ( self ): _lowerCAmelCase : Any = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) _lowerCAmelCase : Optional[Any] = self.get_inputs(lowercase_ ) _lowerCAmelCase : Optional[int] = pipe(**lowercase_ ).images[0] _lowerCAmelCase : Union[str, Any] = load_numpy( """https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy""" ) _lowerCAmelCase : Dict = np.abs(expected_image - image ).max() assert max_diff < 1E-3
309
import unittest from datasets import load_dataset from transformers.pipelines import pipeline from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow @is_pipeline_test @require_torch class snake_case_ ( unittest.TestCase ): @require_torch def __UpperCamelCase ( self : Optional[int] ) -> List[Any]: lowercase__ : Union[str, Any] = pipeline( task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" ) lowercase__ : List[str] = load_dataset("ashraq/esc50" ) lowercase__ : List[Any] = dataset["train"]["audio"][-1]["array"] lowercase__ : Dict = audio_classifier(lowercase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] ) self.assertEqual( nested_simplify(lowercase_ ) , [{"score": 0.5_01, "label": "Sound of a dog"}, {"score": 0.4_99, "label": "Sound of vaccum cleaner"}] , ) @unittest.skip("No models are available in TF" ) def __UpperCamelCase ( self : str ) -> Optional[int]: pass @slow @require_torch def __UpperCamelCase ( self : List[str] ) -> int: lowercase__ : Tuple = pipeline( task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , ) # This is an audio of a dog lowercase__ : Union[str, Any] = load_dataset("ashraq/esc50" ) lowercase__ : Tuple = dataset["train"]["audio"][-1]["array"] lowercase__ : List[Any] = audio_classifier(lowercase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] ) self.assertEqual( nested_simplify(lowercase_ ) , [ {"score": 0.9_99, "label": "Sound of a dog"}, {"score": 0.0_01, "label": "Sound of vaccum cleaner"}, ] , ) lowercase__ : int = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] ) self.assertEqual( nested_simplify(lowercase_ ) , [ [ {"score": 0.9_99, "label": "Sound of a dog"}, {"score": 0.0_01, "label": "Sound of vaccum cleaner"}, ], ] * 5 , ) lowercase__ : Tuple = audio_classifier( [audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 ) self.assertEqual( nested_simplify(lowercase_ ) , [ [ {"score": 0.9_99, "label": "Sound of a dog"}, {"score": 0.0_01, "label": "Sound of vaccum cleaner"}, ], ] * 5 , ) @unittest.skip("No models are available in TF" ) def __UpperCamelCase ( self : Union[str, Any] ) -> Dict: pass
87
0
import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse('3.8'): import importlib_metadata else: import importlib.metadata as importlib_metadata def UpperCamelCase__( UpperCamelCase__ : Any , UpperCamelCase__ : List[str]=False )->str: try: A__ = os.environ[key] except KeyError: # KEY isn't set, default to `default`. A__ = default else: # KEY is set, convert it to True or False. try: A__ = strtobool(_lowerCamelCase ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f"If set, {key} must be yes or no." ) return _value a__: Any = parse_flag_from_env('RUN_SLOW', default=False) a__: List[Any] = parse_flag_from_env('RUN_REMOTE', default=False) a__: Optional[int] = parse_flag_from_env('RUN_LOCAL', default=True) a__: str = parse_flag_from_env('RUN_PACKAGED', default=True) # Compression a__: Optional[int] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4') a__: Union[str, Any] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr') a__: Optional[int] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard') # Audio a__: str = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'), reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ', ) # Beam a__: str = pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'), reason='test requires apache-beam and a compatible dill version', ) # Dill-cloudpickle compatibility a__: Optional[Any] = pytest.mark.skipif( config.DILL_VERSION <= version.parse('0.3.2'), reason='test requires dill>0.3.2 for cloudpickle compatibility', ) # Windows a__: List[str] = pytest.mark.skipif( sys.platform == 'win32', reason='test should not be run on Windows', ) def UpperCamelCase__( UpperCamelCase__ : int )->str: try: import faiss # noqa except ImportError: A__ = unittest.skip('''test requires faiss''' )(_lowerCamelCase ) return test_case def UpperCamelCase__( UpperCamelCase__ : int )->Optional[Any]: try: import regex # noqa except ImportError: A__ = unittest.skip('''test requires regex''' )(_lowerCamelCase ) return test_case def UpperCamelCase__( UpperCamelCase__ : int )->Tuple: try: import elasticsearch # noqa except ImportError: A__ = unittest.skip('''test requires elasticsearch''' )(_lowerCamelCase ) return test_case def UpperCamelCase__( UpperCamelCase__ : Union[str, Any] )->Tuple: try: import sqlalchemy # noqa except ImportError: A__ = unittest.skip('''test requires sqlalchemy''' )(_lowerCamelCase ) return test_case def UpperCamelCase__( UpperCamelCase__ : int )->Any: if not config.TORCH_AVAILABLE: A__ = unittest.skip('''test requires PyTorch''' )(_lowerCamelCase ) return test_case def UpperCamelCase__( UpperCamelCase__ : Tuple )->Tuple: if not config.TF_AVAILABLE: A__ = unittest.skip('''test requires TensorFlow''' )(_lowerCamelCase ) return test_case def UpperCamelCase__( UpperCamelCase__ : Dict )->Union[str, Any]: if not config.JAX_AVAILABLE: A__ = unittest.skip('''test requires JAX''' )(_lowerCamelCase ) return test_case def UpperCamelCase__( UpperCamelCase__ : int )->Optional[int]: if not config.PIL_AVAILABLE: A__ = unittest.skip('''test requires Pillow''' )(_lowerCamelCase ) return test_case def UpperCamelCase__( UpperCamelCase__ : Tuple )->Optional[int]: try: import transformers # noqa F401 except ImportError: return unittest.skip('''test requires transformers''' )(_lowerCamelCase ) else: return test_case def UpperCamelCase__( UpperCamelCase__ : Optional[Any] )->List[str]: try: import tiktoken # noqa F401 except ImportError: return unittest.skip('''test requires tiktoken''' )(_lowerCamelCase ) else: return test_case def UpperCamelCase__( UpperCamelCase__ : Dict )->Any: try: import spacy # noqa F401 except ImportError: return unittest.skip('''test requires spacy''' )(_lowerCamelCase ) else: return test_case def UpperCamelCase__( UpperCamelCase__ : Optional[int] )->Optional[int]: def _require_spacy_model(UpperCamelCase__ : Optional[int] ): try: import spacy # noqa F401 spacy.load(_lowerCamelCase ) except ImportError: return unittest.skip('''test requires spacy''' )(_lowerCamelCase ) except OSError: return unittest.skip('''test requires spacy model \'{}\''''.format(_lowerCamelCase ) )(_lowerCamelCase ) else: return test_case return _require_spacy_model def UpperCamelCase__( UpperCamelCase__ : Dict )->Dict: try: import pyspark # noqa F401 except ImportError: return unittest.skip('''test requires pyspark''' )(_lowerCamelCase ) else: return test_case def UpperCamelCase__( UpperCamelCase__ : List[str] )->Any: try: import joblibspark # noqa F401 except ImportError: return unittest.skip('''test requires joblibspark''' )(_lowerCamelCase ) else: return test_case def UpperCamelCase__( UpperCamelCase__ : Dict )->str: if not _run_slow_tests or _run_slow_tests == 0: A__ = unittest.skip('''test is slow''' )(_lowerCamelCase ) return test_case def UpperCamelCase__( UpperCamelCase__ : int )->List[str]: if not _run_local_tests or _run_local_tests == 0: A__ = unittest.skip('''test is local''' )(_lowerCamelCase ) return test_case def UpperCamelCase__( UpperCamelCase__ : Optional[int] )->Union[str, Any]: if not _run_packaged_tests or _run_packaged_tests == 0: A__ = unittest.skip('''test is packaged''' )(_lowerCamelCase ) return test_case def UpperCamelCase__( UpperCamelCase__ : Tuple )->str: if not _run_remote_tests or _run_remote_tests == 0: A__ = unittest.skip('''test requires remote''' )(_lowerCamelCase ) return test_case def UpperCamelCase__( *UpperCamelCase__ : str )->str: def decorate(cls : str ): for name, fn in cls.__dict__.items(): if callable(_lowerCamelCase ) and name.startswith('''test''' ): for decorator in decorators: A__ = decorator(_lowerCamelCase ) setattr(cls , _lowerCamelCase , _lowerCamelCase ) return cls return decorate class SCREAMING_SNAKE_CASE__ ( __A ): pass class SCREAMING_SNAKE_CASE__ ( __A ): __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = 2 @contextmanager def UpperCamelCase__( UpperCamelCase__ : List[str]=OfflineSimulationMode.CONNECTION_FAILS , UpperCamelCase__ : int=1e-1_6 )->Tuple: A__ = requests.Session().request def timeout_request(UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , **UpperCamelCase__ : str ): # Change the url to an invalid url so that the connection hangs A__ = "https://10.255.255.1" if kwargs.get('''timeout''' ) is None: raise RequestWouldHangIndefinitelyError( f"Tried a call to {url} in offline mode with no timeout set. Please set a timeout." ) A__ = timeout try: return online_request(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier A__ = url A__ = e.args[0] A__ = (max_retry_error.args[0].replace('''10.255.255.1''' , f"OfflineMock[{url}]" ),) A__ = (max_retry_error,) raise def raise_connection_error(UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Tuple ): raise requests.ConnectionError('''Offline mode is enabled.''' , request=_lowerCamelCase ) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch('''requests.Session.send''' , _lowerCamelCase ): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch('''requests.Session.request''' , _lowerCamelCase ): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowerCamelCase ): yield else: raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' ) @contextmanager def UpperCamelCase__( *UpperCamelCase__ : str , **UpperCamelCase__ : Tuple )->List[Any]: A__ = str(Path().resolve() ) with tempfile.TemporaryDirectory(*_lowerCamelCase , **_lowerCamelCase ) as tmp_dir: try: os.chdir(_lowerCamelCase ) yield finally: os.chdir(_lowerCamelCase ) @contextmanager def UpperCamelCase__( )->Tuple: import gc gc.collect() A__ = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def UpperCamelCase__( )->Any: import gc gc.collect() A__ = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def UpperCamelCase__( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] )->Optional[int]: return deepcopy(_lowerCamelCase ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(_lowerCamelCase ).integers(0 , 1_00 , 10 ).tolist() def UpperCamelCase__( UpperCamelCase__ : str )->Tuple: import decorator from requests.exceptions import HTTPError def _wrapper(UpperCamelCase__ : str , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Dict ): try: return func(*_lowerCamelCase , **_lowerCamelCase ) except HTTPError as err: if str(_lowerCamelCase ).startswith('''500''' ) or str(_lowerCamelCase ).startswith('''502''' ): pytest.xfail(str(_lowerCamelCase ) ) raise err return decorator.decorator(_wrapper , _lowerCamelCase ) class SCREAMING_SNAKE_CASE__ : def __init__( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ): A__ = returncode A__ = stdout A__ = stderr async def UpperCamelCase__( UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict )->Union[str, Any]: while True: A__ = await stream.readline() if line: callback(_lowerCamelCase ) else: break async def UpperCamelCase__( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Tuple=False )->Any: if echo: print('''\nRunning: ''' , ''' '''.join(_lowerCamelCase ) ) A__ = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) A__ = [] A__ = [] def tee(UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int]="" ): A__ = line.decode('''utf-8''' ).rstrip() sink.append(_lowerCamelCase ) if not quiet: print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda UpperCamelCase__ : tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label='''stdout:''' ) ), _read_stream(p.stderr , lambda UpperCamelCase__ : tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label='''stderr:''' ) ), ] , timeout=_lowerCamelCase , ) return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase ) def UpperCamelCase__( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : int=1_80 , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Optional[Any]=True )->Optional[Any]: A__ = asyncio.get_event_loop() A__ = loop.run_until_complete( _stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase ) ) A__ = " ".join(_lowerCamelCase ) if result.returncode > 0: A__ = "\n".join(result.stderr ) raise RuntimeError( f"\'{cmd_str}\' failed with returncode {result.returncode}\n\n" f"The combined stderr from workers follows:\n{stderr}" ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(f"\'{cmd_str}\' produced no output." ) return result def UpperCamelCase__( )->List[Any]: A__ = os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''' ) A__ = re.sub(r'''^gw''' , '''''' , _lowerCamelCase , 0 , re.M ) return int(_lowerCamelCase ) def UpperCamelCase__( )->Optional[Any]: A__ = 2_95_00 A__ = pytest_xdist_worker_id() return port + uniq_delta
193
import operator def lowercase_ ( _lowerCamelCase : list , _lowerCamelCase : bool = False , _lowerCamelCase : list | None = None): lowercase__ : int = operator.lt if reverse else operator.gt lowercase__ : str = solution or [] if not arr: return solution lowercase__ : List[str] = [arr.pop(0)] for i, item in enumerate(_lowerCamelCase): if _operator(_lowerCamelCase , sublist[-1]): sublist.append(_lowerCamelCase) arr.pop(_lowerCamelCase) # merging sublist into solution list if not solution: solution.extend(_lowerCamelCase) else: while sublist: lowercase__ : str = sublist.pop(0) for i, xx in enumerate(_lowerCamelCase): if not _operator(_lowerCamelCase , _lowerCamelCase): solution.insert(_lowerCamelCase , _lowerCamelCase) break else: solution.append(_lowerCamelCase) strand_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
87
0
# flake8: noqa # Lint as: python3 __UpperCamelCase : List[Any] = [ 'VerificationMode', 'Version', 'disable_progress_bar', 'enable_progress_bar', 'is_progress_bar_enabled', 'experimental', ] from .info_utils import VerificationMode from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled from .version import Version from .experimental import experimental
182
import time import warnings from abc import ABC from copy import deepcopy from typing import Optional import torch from ..utils import add_start_docstrings, logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = R''' Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs (`Dict[str, Any]`, *optional*): Additional stopping criteria specific kwargs. Return: `bool`. `False` indicates we should continue, `True` indicates we should stop. ''' class snake_case_ ( __A ): @add_start_docstrings(lowercase_ ) def __call__( self : Optional[Any] , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : List[str] ) -> bool: raise NotImplementedError("StoppingCriteria needs to be subclassed" ) class snake_case_ ( __A ): def __init__( self : Dict , lowercase_ : int , lowercase_ : Optional[int] = None ) -> List[str]: lowercase__ : str = max_length lowercase__ : Optional[int] = max_position_embeddings @add_start_docstrings(lowercase_ ) def __call__( self : Tuple , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Union[str, Any] ) -> bool: lowercase__ : str = input_ids.shape[-1] lowercase__ : Any = cur_len >= self.max_length if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: logger.warning_once( "This is a friendly reminder - the current text generation call will exceed the model's predefined " F'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe ''' "exceptions, performance degradation, or nothing at all." ) return is_done class snake_case_ ( __A ): def __init__( self : Tuple , lowercase_ : int , lowercase_ : int ) -> List[str]: warnings.warn( "The class `MaxNewTokensCriteria` is deprecated. " F'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` ''' "with `max_length = start_length + max_new_tokens` instead." , lowercase_ , ) lowercase__ : Optional[int] = start_length lowercase__ : str = max_new_tokens lowercase__ : Tuple = start_length + max_new_tokens @add_start_docstrings(lowercase_ ) def __call__( self : List[Any] , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Dict ) -> bool: return input_ids.shape[-1] >= self.max_length class snake_case_ ( __A ): def __init__( self : Tuple , lowercase_ : float , lowercase_ : Optional[float] = None ) -> Dict: lowercase__ : List[str] = max_time lowercase__ : Tuple = time.time() if initial_timestamp is None else initial_timestamp @add_start_docstrings(lowercase_ ) def __call__( self : int , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Union[str, Any] ) -> bool: return time.time() - self.initial_timestamp > self.max_time class snake_case_ ( __A ): @add_start_docstrings(lowercase_ ) def __call__( self : str , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : List[str] ) -> bool: return any(criteria(lowercase_ , lowercase_ ) for criteria in self ) @property def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]: for stopping_criterium in self: if isinstance(lowercase_ , lowercase_ ): return stopping_criterium.max_length elif isinstance(lowercase_ , lowercase_ ): return stopping_criterium.max_length return None def lowercase_ ( _lowerCamelCase : StoppingCriteriaList , _lowerCamelCase : int): lowercase__ : Optional[int] = stopping_criteria.max_length lowercase__ : str = deepcopy(_lowerCamelCase) if stopping_max_length is not None and stopping_max_length != max_length: warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , _lowerCamelCase) elif stopping_max_length is None: new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowerCamelCase)) return new_stopping_criteria
87
0
from __future__ import annotations import typing from collections.abc import Iterable import numpy as np __lowerCAmelCase : int =typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007 __lowerCAmelCase : str =typing.Union[np.floataa, int, float] # noqa: UP007 def _UpperCamelCase ( lowercase__ , lowercase__ ): return np.sqrt(np.sum((np.asarray(_lowerCamelCase ) - np.asarray(_lowerCamelCase )) ** 2 ) ) def _UpperCamelCase ( lowercase__ , lowercase__ ): return sum((va - va) ** 2 for va, va in zip(_lowerCamelCase , _lowerCamelCase ) ) ** (1 / 2) if __name__ == "__main__": def _UpperCamelCase ( ): from timeit import timeit print('''Without Numpy''' ) print( timeit( '''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=10000 , globals=globals() , ) ) print('''With Numpy''' ) print( timeit( '''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=10000 , globals=globals() , ) ) benchmark()
9
from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def lowercase_ ( _lowerCamelCase : Dict[str, torch.Tensor]): lowercase__ : Any = [] lowercase__ : Optional[int] = [] lowercase__ : Tuple = [] for rt in rc.restypes: lowercase__ : Dict = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]] restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names]) lowercase__ : str = {name: i for i, name in enumerate(_lowerCamelCase)} restype_atomaa_to_atomaa_list.append( [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types]) restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names]) # Add dummy mapping for restype 'UNK' restype_atomaa_to_atomaa_list.append([0] * 14) restype_atomaa_to_atomaa_list.append([0] * 37) restype_atomaa_mask_list.append([0.0] * 14) lowercase__ : Union[str, Any] = torch.tensor( _lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , ) lowercase__ : str = torch.tensor( _lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , ) lowercase__ : List[str] = torch.tensor( _lowerCamelCase , dtype=torch.floataa , device=protein["aatype"].device , ) lowercase__ : str = protein["aatype"].to(torch.long) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein lowercase__ : Dict = restype_atomaa_to_atomaa[protein_aatype] lowercase__ : str = restype_atomaa_mask[protein_aatype] lowercase__ : List[Any] = residx_atomaa_mask lowercase__ : Optional[Any] = residx_atomaa_to_atomaa.long() # create the gather indices for mapping back lowercase__ : str = restype_atomaa_to_atomaa[protein_aatype] lowercase__ : str = residx_atomaa_to_atomaa.long() # create the corresponding mask lowercase__ : Optional[Any] = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["aatype"].device) for restype, restype_letter in enumerate(rc.restypes): lowercase__ : Tuple = rc.restype_atoa[restype_letter] lowercase__ : List[Any] = rc.residue_atoms[restype_name] for atom_name in atom_names: lowercase__ : Optional[int] = rc.atom_order[atom_name] lowercase__ : Tuple = 1 lowercase__ : Dict = restype_atomaa_mask[protein_aatype] lowercase__ : Any = residx_atomaa_mask return protein def lowercase_ ( _lowerCamelCase : Dict[str, torch.Tensor]): lowercase__ : Tuple = tree_map(lambda _lowerCamelCase: torch.tensor(_lowerCamelCase , device=batch["aatype"].device) , _lowerCamelCase , np.ndarray) lowercase__ : List[str] = tensor_tree_map(lambda _lowerCamelCase: np.array(_lowerCamelCase) , make_atomaa_masks(_lowerCamelCase)) return out
87
0
"""simple docstring""" import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ): lowerCAmelCase__ : str = 1.5 lowerCAmelCase__ : Any = int(factor * num_class_images ) lowerCAmelCase__ : Optional[Any] = ClipClient( url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=_lowerCamelCase , aesthetic_weight=0.1 ) os.makedirs(f'{class_data_dir}/images' , exist_ok=_lowerCamelCase ) if len(list(Path(f'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images: return while True: lowerCAmelCase__ : Dict = client.query(text=_lowerCamelCase ) if len(_lowerCamelCase ) >= factor * num_class_images or num_images > 1e4: break else: lowerCAmelCase__ : List[Any] = int(factor * num_images ) lowerCAmelCase__ : Any = ClipClient( url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=_lowerCamelCase , aesthetic_weight=0.1 , ) lowerCAmelCase__ : List[str] = 0 lowerCAmelCase__ : Dict = 0 lowerCAmelCase__ : int = tqdm(desc='''downloading real regularization images''' , total=_lowerCamelCase ) with open(f'{class_data_dir}/caption.txt' , '''w''' ) as fa, open(f'{class_data_dir}/urls.txt' , '''w''' ) as fa, open( f'{class_data_dir}/images.txt' , '''w''' ) as fa: while total < num_class_images: lowerCAmelCase__ : List[str] = class_images[count] count += 1 try: lowerCAmelCase__ : Union[str, Any] = requests.get(images['''url'''] ) if img.status_code == 2_00: lowerCAmelCase__ : List[str] = Image.open(BytesIO(img.content ) ) with open(f'{class_data_dir}/images/{total}.jpg' , '''wb''' ) as f: f.write(img.content ) fa.write(images['''caption'''] + '''\n''' ) fa.write(images['''url'''] + '''\n''' ) fa.write(f'{class_data_dir}/images/{total}.jpg' + '''\n''' ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def __SCREAMING_SNAKE_CASE ( ): lowerCAmelCase__ : Optional[int] = argparse.ArgumentParser('''''' , add_help=_lowerCamelCase ) parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=_lowerCamelCase , type=_lowerCamelCase ) parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=_lowerCamelCase , type=_lowerCamelCase ) parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=2_00 , type=_lowerCamelCase ) return parser.parse_args() if __name__ == "__main__": __UpperCamelCase : Any = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
106
import unittest from transformers import BigBirdConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax from transformers.models.big_bird.modeling_flax_big_bird import ( FlaxBigBirdForCausalLM, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForPreTraining, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, FlaxBigBirdModel, ) class snake_case_ ( unittest.TestCase ): def __init__( self : Tuple , lowercase_ : List[Any] , lowercase_ : Union[str, Any]=2 , lowercase_ : Union[str, Any]=56 , lowercase_ : Tuple=True , lowercase_ : Optional[Any]=True , lowercase_ : Optional[Any]=True , lowercase_ : int=True , lowercase_ : Any=99 , lowercase_ : int=32 , lowercase_ : str=2 , lowercase_ : Union[str, Any]=2 , lowercase_ : Dict=7 , lowercase_ : Dict="gelu_new" , lowercase_ : Tuple=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : Tuple=5_12 , lowercase_ : Optional[Any]=16 , lowercase_ : List[Any]=2 , lowercase_ : Dict=0.02 , lowercase_ : int=4 , lowercase_ : Tuple="block_sparse" , lowercase_ : Dict=True , lowercase_ : Optional[int]=False , lowercase_ : Dict=2 , lowercase_ : int=3 , ) -> Union[str, Any]: lowercase__ : Dict = parent lowercase__ : Dict = batch_size lowercase__ : Tuple = seq_length lowercase__ : Dict = is_training lowercase__ : Dict = use_attention_mask lowercase__ : Tuple = use_token_type_ids lowercase__ : Optional[int] = use_labels lowercase__ : List[Any] = vocab_size lowercase__ : Any = hidden_size lowercase__ : List[Any] = num_hidden_layers lowercase__ : Union[str, Any] = num_attention_heads lowercase__ : str = intermediate_size lowercase__ : int = hidden_act lowercase__ : str = hidden_dropout_prob lowercase__ : List[str] = attention_probs_dropout_prob lowercase__ : Optional[Any] = max_position_embeddings lowercase__ : Union[str, Any] = type_vocab_size lowercase__ : Dict = type_sequence_label_size lowercase__ : Any = initializer_range lowercase__ : List[str] = num_choices lowercase__ : str = rescale_embeddings lowercase__ : Optional[Any] = attention_type lowercase__ : Optional[int] = use_bias lowercase__ : Optional[int] = block_size lowercase__ : str = num_random_blocks def __UpperCamelCase ( self : str ) -> Optional[Any]: lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ : str = None if self.use_attention_mask: lowercase__ : Any = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ : Optional[int] = None if self.use_token_type_ids: lowercase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase__ : int = BigBirdConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , ) return config, input_ids, token_type_ids, attention_mask def __UpperCamelCase ( self : Union[str, Any] ) -> int: lowercase__ : int = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs lowercase__ : Union[str, Any] = { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_flax class snake_case_ ( __A ,unittest.TestCase ): __A : Optional[int] = ( ( FlaxBigBirdForCausalLM, FlaxBigBirdModel, FlaxBigBirdForPreTraining, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, ) if is_flax_available() else () ) __A : List[str] = False __A : Any = False def __UpperCamelCase ( self : List[str] ) -> List[Any]: lowercase__ : Union[str, Any] = FlaxBigBirdModelTester(self ) @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : Optional[int] ) -> Dict: super().test_from_pretrained_save_pretrained() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : List[str] ) -> Any: super().test_from_pretrained_with_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : Tuple ) -> str: super().test_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : Dict ) -> Union[str, Any]: super().test_hidden_states_output() @slow def __UpperCamelCase ( self : Optional[int] ) -> Tuple: for model_class_name in self.all_model_classes: lowercase__ : Optional[Any] = model_class_name.from_pretrained("google/bigbird-roberta-base" ) self.assertIsNotNone(lowercase_ ) def __UpperCamelCase ( self : int ) -> Optional[int]: if self.test_attn_probs: super().test_attention_outputs() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : str ) -> Any: lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowercase__ : Union[str, Any] = self._prepare_for_class(lowercase_ , lowercase_ ) lowercase__ : Optional[Any] = model_class(lowercase_ ) @jax.jit def model_jitted(lowercase_ : Tuple , lowercase_ : int=None , **lowercase_ : Dict ): return model(input_ids=lowercase_ , attention_mask=lowercase_ , **lowercase_ ) with self.subTest("JIT Enabled" ): lowercase__ : int = model_jitted(**lowercase_ ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): lowercase__ : Any = model_jitted(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) ) for jitted_output, output in zip(lowercase_ , lowercase_ ): self.assertEqual(jitted_output.shape , output.shape ) def __UpperCamelCase ( self : List[Any] , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : List[Any]=1E-5 , lowercase_ : Any="outputs" , lowercase_ : List[str]=None ) -> List[Any]: # `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version, # an effort was done to return `attention_probs` (yet to be verified). if name.startswith("outputs.attentions" ): return else: super().check_pt_flax_outputs(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
87
0
import argparse import collections import json import os import re import string import sys import numpy as np A : List[str] = re.compile(r'\b(a|an|the)\b', re.UNICODE) A : Union[str, Any] = None def UpperCamelCase ( ) -> Optional[Any]: """simple docstring""" lowercase__ = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" ) parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" ) parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" ) parser.add_argument( """--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" ) parser.add_argument( """--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" ) parser.add_argument( """--na-prob-thresh""" , """-t""" , type=_lowerCamelCase , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , ) parser.add_argument( """--out-image-dir""" , """-p""" , metavar="""out_images""" , default=_lowerCamelCase , help="""Save precision-recall curves to directory.""" ) parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" ) if len(sys.argv ) == 1: parser.print_help() sys.exit(1 ) return parser.parse_args() def UpperCamelCase ( __magic_name__ : List[Any] ) -> List[str]: """simple docstring""" lowercase__ = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: lowercase__ = bool(qa["""answers"""]["""text"""] ) return qid_to_has_ans def UpperCamelCase ( __magic_name__ : Union[str, Any] ) -> Optional[Any]: """simple docstring""" def remove_articles(__magic_name__ : List[str] ): return ARTICLES_REGEX.sub(""" """ , _lowerCamelCase ) def white_space_fix(__magic_name__ : int ): return " ".join(text.split() ) def remove_punc(__magic_name__ : Tuple ): lowercase__ = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(__magic_name__ : Any ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_lowerCamelCase ) ) ) ) def UpperCamelCase ( __magic_name__ : str ) -> int: """simple docstring""" if not s: return [] return normalize_answer(_lowerCamelCase ).split() def UpperCamelCase ( __magic_name__ : Optional[int] , __magic_name__ : Any ) -> int: """simple docstring""" return int(normalize_answer(_lowerCamelCase ) == normalize_answer(_lowerCamelCase ) ) def UpperCamelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] ) -> str: """simple docstring""" lowercase__ = get_tokens(_lowerCamelCase ) lowercase__ = get_tokens(_lowerCamelCase ) lowercase__ = collections.Counter(_lowerCamelCase ) & collections.Counter(_lowerCamelCase ) lowercase__ = sum(common.values() ) if len(_lowerCamelCase ) == 0 or len(_lowerCamelCase ) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks ) if num_same == 0: return 0 lowercase__ = 1.0 * num_same / len(_lowerCamelCase ) lowercase__ = 1.0 * num_same / len(_lowerCamelCase ) lowercase__ = (2 * precision * recall) / (precision + recall) return fa def UpperCamelCase ( __magic_name__ : Tuple , __magic_name__ : Any ) -> Dict: """simple docstring""" lowercase__ = {} lowercase__ = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: lowercase__ = qa["id"] lowercase__ = [t for t in qa["answers"]["text"] if normalize_answer(_lowerCamelCase )] if not gold_answers: # For unanswerable questions, only correct answer is empty string lowercase__ = [""] if qid not in preds: print(f'''Missing prediction for {qid}''' ) continue lowercase__ = preds[qid] # Take max over all gold answers lowercase__ = max(compute_exact(_lowerCamelCase , _lowerCamelCase ) for a in gold_answers ) lowercase__ = max(compute_fa(_lowerCamelCase , _lowerCamelCase ) for a in gold_answers ) return exact_scores, fa_scores def UpperCamelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Any ) -> Optional[Any]: """simple docstring""" lowercase__ = {} for qid, s in scores.items(): lowercase__ = na_probs[qid] > na_prob_thresh if pred_na: lowercase__ = float(not qid_to_has_ans[qid] ) else: lowercase__ = s return new_scores def UpperCamelCase ( __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any]=None ) -> Tuple: """simple docstring""" if not qid_list: lowercase__ = len(_lowerCamelCase ) return collections.OrderedDict( [ ("""exact""", 1_0_0.0 * sum(exact_scores.values() ) / total), ("""f1""", 1_0_0.0 * sum(fa_scores.values() ) / total), ("""total""", total), ] ) else: lowercase__ = len(_lowerCamelCase ) return collections.OrderedDict( [ ("""exact""", 1_0_0.0 * sum(exact_scores[k] for k in qid_list ) / total), ("""f1""", 1_0_0.0 * sum(fa_scores[k] for k in qid_list ) / total), ("""total""", total), ] ) def UpperCamelCase ( __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Any ) -> List[Any]: """simple docstring""" for k in new_eval: lowercase__ = new_eval[k] def UpperCamelCase ( __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : List[Any] ) -> Dict: """simple docstring""" plt.step(_lowerCamelCase , _lowerCamelCase , color="""b""" , alpha=0.2 , where="""post""" ) plt.fill_between(_lowerCamelCase , _lowerCamelCase , step="""post""" , alpha=0.2 , color="""b""" ) plt.xlabel("""Recall""" ) plt.ylabel("""Precision""" ) plt.xlim([0.0, 1.0_5] ) plt.ylim([0.0, 1.0_5] ) plt.title(_lowerCamelCase ) plt.savefig(_lowerCamelCase ) plt.clf() def UpperCamelCase ( __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : List[Any] , __magic_name__ : Dict=None , __magic_name__ : Optional[int]=None ) -> Tuple: """simple docstring""" lowercase__ = sorted(_lowerCamelCase , key=lambda __magic_name__ : na_probs[k] ) lowercase__ = 0.0 lowercase__ = 1.0 lowercase__ = 0.0 lowercase__ = [1.0] lowercase__ = [0.0] lowercase__ = 0.0 for i, qid in enumerate(_lowerCamelCase ): if qid_to_has_ans[qid]: true_pos += scores[qid] lowercase__ = true_pos / float(i + 1 ) lowercase__ = true_pos / float(_lowerCamelCase ) if i == len(_lowerCamelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(_lowerCamelCase ) recalls.append(_lowerCamelCase ) if out_image: plot_pr_curve(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) return {"ap": 1_0_0.0 * avg_prec} def UpperCamelCase ( __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] ) -> Optional[int]: """simple docstring""" if out_image_dir and not os.path.exists(_lowerCamelCase ): os.makedirs(_lowerCamelCase ) lowercase__ = sum(1 for v in qid_to_has_ans.values() if v ) if num_true_pos == 0: return lowercase__ = make_precision_recall_eval( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , out_image=os.path.join(_lowerCamelCase , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , ) lowercase__ = make_precision_recall_eval( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , out_image=os.path.join(_lowerCamelCase , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , ) lowercase__ = {k: float(_lowerCamelCase ) for k, v in qid_to_has_ans.items()} lowercase__ = make_precision_recall_eval( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , out_image=os.path.join(_lowerCamelCase , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , ) merge_eval(_lowerCamelCase , _lowerCamelCase , """pr_exact""" ) merge_eval(_lowerCamelCase , _lowerCamelCase , """pr_f1""" ) merge_eval(_lowerCamelCase , _lowerCamelCase , """pr_oracle""" ) def UpperCamelCase ( __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] ) -> List[str]: """simple docstring""" if not qid_list: return lowercase__ = [na_probs[k] for k in qid_list] lowercase__ = np.ones_like(_lowerCamelCase ) / float(len(_lowerCamelCase ) ) plt.hist(_lowerCamelCase , weights=_lowerCamelCase , bins=20 , range=(0.0, 1.0) ) plt.xlabel("""Model probability of no-answer""" ) plt.ylabel("""Proportion of dataset""" ) plt.title(f'''Histogram of no-answer probability: {name}''' ) plt.savefig(os.path.join(_lowerCamelCase , f'''na_prob_hist_{name}.png''' ) ) plt.clf() def UpperCamelCase ( __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : Any , __magic_name__ : Optional[int] ) -> Dict: """simple docstring""" lowercase__ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] ) lowercase__ = num_no_ans lowercase__ = cur_score lowercase__ = 0.0 lowercase__ = sorted(_lowerCamelCase , key=lambda __magic_name__ : na_probs[k] ) for i, qid in enumerate(_lowerCamelCase ): if qid not in scores: continue if qid_to_has_ans[qid]: lowercase__ = scores[qid] else: if preds[qid]: lowercase__ = -1 else: lowercase__ = 0 cur_score += diff if cur_score > best_score: lowercase__ = cur_score lowercase__ = na_probs[qid] return 1_0_0.0 * best_score / len(_lowerCamelCase ), best_thresh def UpperCamelCase ( __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] ) -> Dict: """simple docstring""" lowercase__ = find_best_thresh(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) lowercase__ = find_best_thresh(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) lowercase__ = best_exact lowercase__ = exact_thresh lowercase__ = best_fa lowercase__ = fa_thresh def UpperCamelCase ( ) -> Dict: """simple docstring""" with open(OPTS.data_file ) as f: lowercase__ = json.load(_lowerCamelCase ) lowercase__ = dataset_json["data"] with open(OPTS.pred_file ) as f: lowercase__ = json.load(_lowerCamelCase ) if OPTS.na_prob_file: with open(OPTS.na_prob_file ) as f: lowercase__ = json.load(_lowerCamelCase ) else: lowercase__ = {k: 0.0 for k in preds} lowercase__ = make_qid_to_has_ans(_lowerCamelCase ) # maps qid to True/False lowercase__ = [k for k, v in qid_to_has_ans.items() if v] lowercase__ = [k for k, v in qid_to_has_ans.items() if not v] lowercase__ = get_raw_scores(_lowerCamelCase , _lowerCamelCase ) lowercase__ = apply_no_ans_threshold(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , OPTS.na_prob_thresh ) lowercase__ = apply_no_ans_threshold(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , OPTS.na_prob_thresh ) lowercase__ = make_eval_dict(_lowerCamelCase , _lowerCamelCase ) if has_ans_qids: lowercase__ = make_eval_dict(_lowerCamelCase , _lowerCamelCase , qid_list=_lowerCamelCase ) merge_eval(_lowerCamelCase , _lowerCamelCase , """HasAns""" ) if no_ans_qids: lowercase__ = make_eval_dict(_lowerCamelCase , _lowerCamelCase , qid_list=_lowerCamelCase ) merge_eval(_lowerCamelCase , _lowerCamelCase , """NoAns""" ) if OPTS.na_prob_file: find_all_best_thresh(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , OPTS.out_image_dir ) histogram_na_prob(_lowerCamelCase , _lowerCamelCase , OPTS.out_image_dir , """hasAns""" ) histogram_na_prob(_lowerCamelCase , _lowerCamelCase , OPTS.out_image_dir , """noAns""" ) if OPTS.out_file: with open(OPTS.out_file , """w""" ) as f: json.dump(_lowerCamelCase , _lowerCamelCase ) else: print(json.dumps(_lowerCamelCase , indent=2 ) ) if __name__ == "__main__": A : List[str] = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt main()
305
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCamelCase = { '''configuration_groupvit''': [ '''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GroupViTConfig''', '''GroupViTOnnxConfig''', '''GroupViTTextConfig''', '''GroupViTVisionConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GroupViTModel''', '''GroupViTPreTrainedModel''', '''GroupViTTextModel''', '''GroupViTVisionModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFGroupViTModel''', '''TFGroupViTPreTrainedModel''', '''TFGroupViTTextModel''', '''TFGroupViTVisionModel''', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
87
0
from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def lowerCAmelCase_ ( _lowercase : Dict[str, torch.Tensor]) -> Union[str, Any]: """simple docstring""" a__ : Any = [] a__ : Optional[int] = [] a__ : Tuple = [] for rt in rc.restypes: a__ : Dict = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]] restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names]) a__ : str = {name: i for i, name in enumerate(_lowerCamelCase)} restype_atomaa_to_atomaa_list.append( [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types]) restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names]) # Add dummy mapping for restype 'UNK' restype_atomaa_to_atomaa_list.append([0] * 14) restype_atomaa_to_atomaa_list.append([0] * 37) restype_atomaa_mask_list.append([0.0] * 14) a__ : Union[str, Any] = torch.tensor( _lowerCamelCase , dtype=torch.intaa , device=protein["""aatype"""].device , ) a__ : str = torch.tensor( _lowerCamelCase , dtype=torch.intaa , device=protein["""aatype"""].device , ) a__ : List[str] = torch.tensor( _lowerCamelCase , dtype=torch.floataa , device=protein["""aatype"""].device , ) a__ : str = protein["aatype"].to(torch.long) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein a__ : Dict = restype_atomaa_to_atomaa[protein_aatype] a__ : str = restype_atomaa_mask[protein_aatype] a__ : List[Any] = residx_atomaa_mask a__ : Optional[Any] = residx_atomaa_to_atomaa.long() # create the gather indices for mapping back a__ : str = restype_atomaa_to_atomaa[protein_aatype] a__ : str = residx_atomaa_to_atomaa.long() # create the corresponding mask a__ : Optional[Any] = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["""aatype"""].device) for restype, restype_letter in enumerate(rc.restypes): a__ : Tuple = rc.restype_atoa[restype_letter] a__ : List[Any] = rc.residue_atoms[restype_name] for atom_name in atom_names: a__ : Optional[int] = rc.atom_order[atom_name] a__ : Tuple = 1 a__ : Dict = restype_atomaa_mask[protein_aatype] a__ : Any = residx_atomaa_mask return protein def lowerCAmelCase_ ( _lowercase : Dict[str, torch.Tensor]) -> Optional[Any]: """simple docstring""" a__ : Tuple = tree_map(lambda _lowercase: torch.tensor(_lowerCamelCase , device=batch["""aatype"""].device) , _lowerCamelCase , np.ndarray) a__ : List[str] = tensor_tree_map(lambda _lowercase: np.array(_lowerCamelCase) , make_atomaa_masks(_lowerCamelCase)) return out
170
import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : int): assert isinstance(_lowerCamelCase , _lowerCamelCase) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True]) def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : str): lowercase__ : Optional[int] = tmp_path / "cache" lowercase__ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase).read() _check_json_dataset(_lowerCamelCase , _lowerCamelCase) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict , _lowerCamelCase : Dict): lowercase__ : List[Any] = tmp_path / "cache" lowercase__ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : List[Any] = features.copy() if features else default_expected_features lowercase__ : List[Any] = ( Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None ) lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read() _check_json_dataset(_lowerCamelCase , _lowerCamelCase) @pytest.mark.parametrize( "features" , [ None, {"col_3": "float64", "col_1": "string", "col_2": "int64"}, ] , ) def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : List[str]): lowercase__ : Optional[Any] = tmp_path / "cache" lowercase__ : Tuple = {"col_3": "float64", "col_1": "string", "col_2": "int64"} lowercase__ : List[Any] = features.copy() if features else default_expected_features lowercase__ : int = ( Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None ) lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read() assert isinstance(_lowerCamelCase , _lowerCamelCase) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int]): # jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"} lowercase__ : Any = {"col_2": "int64", "col_3": "float64", "col_1": "string"} lowercase__ : str = features.copy() lowercase__ : str = ( Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None ) lowercase__ : Optional[int] = tmp_path / "cache" lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read() assert isinstance(_lowerCamelCase , _lowerCamelCase) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("split" , [None, NamedSplit("train"), "train", "test"]) def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str]): lowercase__ : Union[str, Any] = tmp_path / "cache" lowercase__ : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , split=_lowerCamelCase).read() _check_json_dataset(_lowerCamelCase , _lowerCamelCase) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list]) def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int): if issubclass(_lowerCamelCase , _lowerCamelCase): lowercase__ : Tuple = jsonl_path elif issubclass(_lowerCamelCase , _lowerCamelCase): lowercase__ : str = [jsonl_path] lowercase__ : str = tmp_path / "cache" lowercase__ : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : Tuple = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase).read() _check_json_dataset(_lowerCamelCase , _lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int]=("train",)): assert isinstance(_lowerCamelCase , _lowerCamelCase) for split in splits: lowercase__ : Optional[Any] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True]) def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : str): lowercase__ : List[str] = tmp_path / "cache" lowercase__ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowercase__ : Optional[Any] = JsonDatasetReader({"train": jsonl_path} , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase).read() _check_json_datasetdict(_lowerCamelCase , _lowerCamelCase) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : List[str]): lowercase__ : str = tmp_path / "cache" lowercase__ : Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : Tuple = features.copy() if features else default_expected_features lowercase__ : Union[str, Any] = ( Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None ) lowercase__ : Tuple = JsonDatasetReader({"train": jsonl_path} , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read() _check_json_datasetdict(_lowerCamelCase , _lowerCamelCase) @pytest.mark.parametrize("split" , [None, NamedSplit("train"), "train", "test"]) def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Tuple): if split: lowercase__ : Tuple = {split: jsonl_path} else: lowercase__ : Tuple = "train" lowercase__ : int = {"train": jsonl_path, "test": jsonl_path} lowercase__ : Dict = tmp_path / "cache" lowercase__ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase).read() _check_json_datasetdict(_lowerCamelCase , _lowerCamelCase , splits=list(path.keys())) assert all(dataset[split].split == split for split in path.keys()) def lowercase_ ( _lowerCamelCase : Union[str, Any]): return json.load(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Optional[int]): return [json.loads(_lowerCamelCase) for line in buffer] class snake_case_ : @pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] ) def __UpperCamelCase ( self : List[Any] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Optional[Any]: with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ ).write() buffer.seek(0 ) lowercase__ : Optional[int] = load_json_function(lowercase_ ) assert isinstance(lowercase_ , lowercase_ ) assert isinstance(exported_content[0] , lowercase_ ) assert len(lowercase_ ) == 10 @pytest.mark.parametrize( "orient, container, keys, len_at" , [ ("records", list, {"tokens", "labels", "answers", "id"}, None), ("split", dict, {"columns", "data"}, "data"), ("index", dict, set("0123456789" ), None), ("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"), ("values", list, None, None), ("table", dict, {"schema", "data"}, "data"), ] , ) def __UpperCamelCase ( self : str , lowercase_ : int , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Tuple ) -> List[str]: with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , orient=lowercase_ ).write() buffer.seek(0 ) lowercase__ : str = load_json(lowercase_ ) assert isinstance(lowercase_ , lowercase_ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(lowercase_ , "keys" ) and not hasattr(exported_content[0] , "keys" ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(lowercase_ ) == 10 @pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] ) def __UpperCamelCase ( self : List[Any] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Optional[int]: with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , num_proc=2 ).write() buffer.seek(0 ) lowercase__ : str = load_json_function(lowercase_ ) assert isinstance(lowercase_ , lowercase_ ) assert isinstance(exported_content[0] , lowercase_ ) assert len(lowercase_ ) == 10 @pytest.mark.parametrize( "orient, container, keys, len_at" , [ ("records", list, {"tokens", "labels", "answers", "id"}, None), ("split", dict, {"columns", "data"}, "data"), ("index", dict, set("0123456789" ), None), ("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"), ("values", list, None, None), ("table", dict, {"schema", "data"}, "data"), ] , ) def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Any: with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , orient=lowercase_ , num_proc=2 ).write() buffer.seek(0 ) lowercase__ : Optional[Any] = load_json(lowercase_ ) assert isinstance(lowercase_ , lowercase_ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(lowercase_ , "keys" ) and not hasattr(exported_content[0] , "keys" ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(lowercase_ ) == 10 def __UpperCamelCase ( self : Dict , lowercase_ : List[str] ) -> str: with pytest.raises(lowercase_ ): with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , num_proc=0 ) @pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] ) def __UpperCamelCase ( self : List[Any] , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : List[Any] ) -> Any: lowercase__ : Dict = tmp_path_factory.mktemp("data" ) / F'''test.json.{extension}''' lowercase__ : Optional[int] = str(shared_datadir / F'''test_file.json.{extension}''' ) JsonDatasetWriter(lowercase_ , lowercase_ , compression=lowercase_ ).write() with fsspec.open(lowercase_ , "rb" , compression="infer" ) as f: lowercase__ : List[Any] = f.read() with fsspec.open(lowercase_ , "rb" , compression="infer" ) as f: lowercase__ : str = f.read() assert exported_content == original_content
87
0
'''simple docstring''' def _UpperCamelCase ( __A ) -> int: '''simple docstring''' UpperCamelCase__ = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(2_7)) print(perfect_cube(4))
80
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class snake_case_ ( __A ): __A : Optional[Any] = ["image_processor", "tokenizer"] __A : Tuple = "LayoutLMv3ImageProcessor" __A : List[Any] = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast") def __init__( self : Union[str, Any] , lowercase_ : int=None , lowercase_ : str=None , **lowercase_ : Optional[Any] ) -> Optional[int]: lowercase__ : Union[str, Any] = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , lowercase_ , ) lowercase__ : Optional[int] = kwargs.pop("feature_extractor" ) lowercase__ : int = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(lowercase_ , lowercase_ ) def __call__( self : Dict , lowercase_ : Optional[Any] , lowercase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase_ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowercase_ : Union[List[List[int]], List[List[List[int]]]] = None , lowercase_ : Optional[Union[List[int], List[List[int]]]] = None , lowercase_ : bool = True , lowercase_ : Union[bool, str, PaddingStrategy] = False , lowercase_ : Union[bool, str, TruncationStrategy] = None , lowercase_ : Optional[int] = None , lowercase_ : int = 0 , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = True , lowercase_ : Optional[Union[str, TensorType]] = None , **lowercase_ : Dict , ) -> BatchEncoding: # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( "You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( "You cannot provide word labels if you initialized the image processor with apply_ocr set to True." ) # first, apply the image processor lowercase__ : Union[str, Any] = self.image_processor(images=lowercase_ , return_tensors=lowercase_ ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(lowercase_ , lowercase_ ): lowercase__ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension) lowercase__ : Any = features["words"] lowercase__ : Tuple = self.tokenizer( text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , ) # add pixel values lowercase__ : Optional[int] = features.pop("pixel_values" ) if return_overflowing_tokens is True: lowercase__ : Dict = self.get_overflowing_images(lowercase_ , encoded_inputs["overflow_to_sample_mapping"] ) lowercase__ : str = images return encoded_inputs def __UpperCamelCase ( self : List[Any] , lowercase_ : Tuple , lowercase_ : List[Any] ) -> Dict: # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image lowercase__ : Tuple = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(lowercase_ ) != len(lowercase_ ): raise ValueError( "Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got" F''' {len(lowercase_ )} and {len(lowercase_ )}''' ) return images_with_overflow def __UpperCamelCase ( self : int , *lowercase_ : Union[str, Any] , **lowercase_ : List[str] ) -> Union[str, Any]: return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : Union[str, Any] , *lowercase_ : str , **lowercase_ : int ) -> Dict: return self.tokenizer.decode(*lowercase_ , **lowercase_ ) @property def __UpperCamelCase ( self : Any ) -> Any: return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase_ , ) return self.image_processor_class @property def __UpperCamelCase ( self : List[Any] ) -> Tuple: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase_ , ) return self.image_processor
87
0
def a_ ( SCREAMING_SNAKE_CASE__ : int = 2_000_000 ): '''simple docstring''' _lowerCamelCase : List[Any] =[0 for i in range(n + 1 )] _lowerCamelCase : Dict =1 _lowerCamelCase : List[Any] =1 for i in range(2 , int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i , n + 1 , _lowerCamelCase ): _lowerCamelCase : Union[str, Any] =1 _lowerCamelCase : List[Any] =0 for i in range(_lowerCamelCase ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(F"""{solution() = }""")
199
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging UpperCamelCase = logging.get_logger(__name__) if is_vision_available(): import PIL class snake_case_ ( __A ): __A : str = ["pixel_values"] def __init__( self : int , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 2_55 , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = True , **lowercase_ : Union[str, Any] , ) -> None: super().__init__(**lowercase_ ) lowercase__ : Tuple = size if size is not None else {"shortest_edge": 2_24} lowercase__ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_ ) lowercase__ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24} lowercase__ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_ , param_name="crop_size" ) lowercase__ : Dict = do_resize lowercase__ : List[Any] = size lowercase__ : int = resample lowercase__ : Union[str, Any] = do_center_crop lowercase__ : Optional[int] = crop_size lowercase__ : List[str] = do_rescale lowercase__ : int = rescale_factor lowercase__ : List[Any] = do_normalize lowercase__ : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN lowercase__ : str = image_std if image_std is not None else OPENAI_CLIP_STD lowercase__ : Dict = do_convert_rgb def __UpperCamelCase ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Union[str, Any] , ) -> np.ndarray: lowercase__ : str = get_size_dict(lowercase_ , default_to_square=lowercase_ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) lowercase__ : Dict = get_resize_output_image_size(lowercase_ , size=size["shortest_edge"] , default_to_square=lowercase_ ) return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : int , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : int , ) -> np.ndarray: lowercase__ : Optional[Any] = get_size_dict(lowercase_ ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(lowercase_ , size=(size["height"], size["width"]) , data_format=lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[Any] , ) -> Any: return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : str , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : str , ) -> np.ndarray: return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : Optional[Any] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : int = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **lowercase_ : Union[str, Any] , ) -> PIL.Image.Image: lowercase__ : int = do_resize if do_resize is not None else self.do_resize lowercase__ : Dict = size if size is not None else self.size lowercase__ : List[Any] = get_size_dict(lowercase_ , param_name="size" , default_to_square=lowercase_ ) lowercase__ : Dict = resample if resample is not None else self.resample lowercase__ : int = do_center_crop if do_center_crop is not None else self.do_center_crop lowercase__ : Dict = crop_size if crop_size is not None else self.crop_size lowercase__ : List[str] = get_size_dict(lowercase_ , param_name="crop_size" , default_to_square=lowercase_ ) lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale lowercase__ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase__ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize lowercase__ : int = image_mean if image_mean is not None else self.image_mean lowercase__ : List[str] = image_std if image_std is not None else self.image_std lowercase__ : Union[str, Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowercase__ : Union[str, Any] = make_list_of_images(lowercase_ ) if not valid_images(lowercase_ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowercase__ : Dict = [convert_to_rgb(lowercase_ ) for image in images] # All transformations expect numpy arrays. lowercase__ : Optional[Any] = [to_numpy_array(lowercase_ ) for image in images] if do_resize: lowercase__ : List[Any] = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images] if do_center_crop: lowercase__ : int = [self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images] if do_rescale: lowercase__ : str = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images] if do_normalize: lowercase__ : Optional[int] = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images] lowercase__ : Optional[Any] = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images] lowercase__ : List[str] = {"pixel_values": images} return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
87
0
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging __A : Optional[Any] = logging.get_logger(__name__) if is_vision_available(): import PIL class _UpperCAmelCase ( __A ): SCREAMING_SNAKE_CASE_ : str = ["pixel_values"] def __init__( self : int , A : bool = True , A : Dict[str, int] = None , A : PILImageResampling = PILImageResampling.BICUBIC , A : bool = True , A : Dict[str, int] = None , A : bool = True , A : Union[int, float] = 1 / 2_55 , A : bool = True , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , A : bool = True , **A : Union[str, Any] , ) -> None: super().__init__(**lowercase_ ) lowercase_ : Tuple = size if size is not None else {"shortest_edge": 2_24} lowercase_ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_ ) lowercase_ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24} lowercase_ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_ , param_name='''crop_size''' ) lowercase_ : Dict = do_resize lowercase_ : List[Any] = size lowercase_ : int = resample lowercase_ : Union[str, Any] = do_center_crop lowercase_ : Optional[int] = crop_size lowercase_ : List[str] = do_rescale lowercase_ : int = rescale_factor lowercase_ : List[Any] = do_normalize lowercase_ : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN lowercase_ : str = image_std if image_std is not None else OPENAI_CLIP_STD lowercase_ : Dict = do_convert_rgb def A ( self : Optional[Any] , A : np.ndarray , A : Dict[str, int] , A : PILImageResampling = PILImageResampling.BICUBIC , A : Optional[Union[str, ChannelDimension]] = None , **A : Union[str, Any] , ) -> np.ndarray: lowercase_ : str = get_size_dict(lowercase_ , default_to_square=lowercase_ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) lowercase_ : Dict = get_resize_output_image_size(lowercase_ , size=size['''shortest_edge'''] , default_to_square=lowercase_ ) return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ ) def A ( self : int , A : np.ndarray , A : Dict[str, int] , A : Optional[Union[str, ChannelDimension]] = None , **A : int , ) -> np.ndarray: lowercase_ : Optional[Any] = get_size_dict(lowercase_ ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(lowercase_ , size=(size['''height'''], size['''width''']) , data_format=lowercase_ , **lowercase_ ) def A ( self : Optional[Any] , A : np.ndarray , A : Union[int, float] , A : Optional[Union[str, ChannelDimension]] = None , **A : Optional[Any] , ) -> Any: return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ ) def A ( self : str , A : np.ndarray , A : Union[float, List[float]] , A : Union[float, List[float]] , A : Optional[Union[str, ChannelDimension]] = None , **A : str , ) -> np.ndarray: return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ ) def A ( self : Optional[Any] , A : ImageInput , A : bool = None , A : Dict[str, int] = None , A : PILImageResampling = None , A : bool = None , A : int = None , A : bool = None , A : float = None , A : bool = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , A : bool = None , A : Optional[Union[str, TensorType]] = None , A : Optional[ChannelDimension] = ChannelDimension.FIRST , **A : Union[str, Any] , ) -> PIL.Image.Image: lowercase_ : int = do_resize if do_resize is not None else self.do_resize lowercase_ : Dict = size if size is not None else self.size lowercase_ : List[Any] = get_size_dict(lowercase_ , param_name='''size''' , default_to_square=lowercase_ ) lowercase_ : Dict = resample if resample is not None else self.resample lowercase_ : int = do_center_crop if do_center_crop is not None else self.do_center_crop lowercase_ : Dict = crop_size if crop_size is not None else self.crop_size lowercase_ : List[str] = get_size_dict(lowercase_ , param_name='''crop_size''' , default_to_square=lowercase_ ) lowercase_ : int = do_rescale if do_rescale is not None else self.do_rescale lowercase_ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase_ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize lowercase_ : int = image_mean if image_mean is not None else self.image_mean lowercase_ : List[str] = image_std if image_std is not None else self.image_std lowercase_ : Union[str, Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowercase_ : Union[str, Any] = make_list_of_images(lowercase_ ) if not valid_images(lowercase_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowercase_ : Dict = [convert_to_rgb(lowercase_ ) for image in images] # All transformations expect numpy arrays. lowercase_ : Optional[Any] = [to_numpy_array(lowercase_ ) for image in images] if do_resize: lowercase_ : List[Any] = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images] if do_center_crop: lowercase_ : int = [self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images] if do_rescale: lowercase_ : str = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images] if do_normalize: lowercase_ : Optional[int] = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images] lowercase_ : Optional[Any] = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images] lowercase_ : List[str] = {"pixel_values": images} return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
33
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available UpperCamelCase = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''GPTSw3Tokenizer'''] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_swa import GPTSwaTokenizer else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
87
0
'''simple docstring''' from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available from ...utils import OptionalDependencyNotAvailable A__: List[str] = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__: Dict = ['''DPTFeatureExtractor'''] A__: List[str] = ['''DPTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__: Optional[int] = [ '''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DPTForDepthEstimation''', '''DPTForSemanticSegmentation''', '''DPTModel''', '''DPTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_dpt import DPTFeatureExtractor from .image_processing_dpt import DPTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dpt import ( DPT_PRETRAINED_MODEL_ARCHIVE_LIST, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel, DPTPreTrainedModel, ) else: import sys A__: Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
276
UpperCamelCase = [0, 2, 4, 6, 8] UpperCamelCase = [1, 3, 5, 7, 9] def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[int] , _lowerCamelCase : int): if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1 , -1 , -1): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 10 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 lowercase__ : str = 0 for digit in range(10): lowercase__ : str = digit result += reversible_numbers( 0 , (remainder + 2 * digit) // 10 , _lowerCamelCase , _lowerCamelCase) return result lowercase__ : Dict = 0 for digita in range(10): lowercase__ : int = digita if (remainder + digita) % 2 == 0: lowercase__ : Optional[Any] = ODD_DIGITS else: lowercase__ : str = EVEN_DIGITS for digita in other_parity_digits: lowercase__ : List[str] = digita result += reversible_numbers( remaining_length - 2 , (remainder + digita + digita) // 10 , _lowerCamelCase , _lowerCamelCase , ) return result def lowercase_ ( _lowerCamelCase : int = 9): lowercase__ : Tuple = 0 for length in range(1 , max_power + 1): result += reversible_numbers(_lowerCamelCase , 0 , [0] * length , _lowerCamelCase) return result if __name__ == "__main__": print(f"{solution() = }")
87
0
'''simple docstring''' import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class a_ (unittest.TestCase ): def __UpperCamelCase ( self ): _lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) _lowerCAmelCase : Dict = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowercase_ ) _lowerCAmelCase : Tuple = -1 _lowerCAmelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowercase_ ) _lowerCAmelCase : int = model.generate(lowercase_ , max_new_tokens=1_0 , do_sample=lowercase_ ) _lowerCAmelCase : Union[str, Any] = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: _lowerCAmelCase : int = TextStreamer(lowercase_ ) model.generate(lowercase_ , max_new_tokens=1_0 , do_sample=lowercase_ , streamer=lowercase_ ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _lowerCAmelCase : Any = cs.out[:-1] self.assertEqual(lowercase_ , lowercase_ ) def __UpperCamelCase ( self ): _lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) _lowerCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowercase_ ) _lowerCAmelCase : str = -1 _lowerCAmelCase : Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowercase_ ) _lowerCAmelCase : Optional[Any] = model.generate(lowercase_ , max_new_tokens=1_0 , do_sample=lowercase_ ) _lowerCAmelCase : List[Any] = tokenizer.decode(greedy_ids[0] ) _lowerCAmelCase : Any = TextIteratorStreamer(lowercase_ ) _lowerCAmelCase : List[Any] = {"input_ids": input_ids, "max_new_tokens": 1_0, "do_sample": False, "streamer": streamer} _lowerCAmelCase : Dict = Thread(target=model.generate , kwargs=lowercase_ ) thread.start() _lowerCAmelCase : List[str] = "" for new_text in streamer: streamer_text += new_text self.assertEqual(lowercase_ , lowercase_ ) def __UpperCamelCase ( self ): _lowerCAmelCase : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) _lowerCAmelCase : Dict = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowercase_ ) _lowerCAmelCase : int = -1 _lowerCAmelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowercase_ ) _lowerCAmelCase : Dict = model.generate(lowercase_ , max_new_tokens=1_0 , do_sample=lowercase_ ) _lowerCAmelCase : Tuple = greedy_ids[:, input_ids.shape[1] :] _lowerCAmelCase : Dict = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: _lowerCAmelCase : List[Any] = TextStreamer(lowercase_ , skip_prompt=lowercase_ ) model.generate(lowercase_ , max_new_tokens=1_0 , do_sample=lowercase_ , streamer=lowercase_ ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _lowerCAmelCase : Dict = cs.out[:-1] self.assertEqual(lowercase_ , lowercase_ ) def __UpperCamelCase ( self ): # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them _lowerCAmelCase : Dict = AutoTokenizer.from_pretrained("""distilgpt2""" ) _lowerCAmelCase : str = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(lowercase_ ) _lowerCAmelCase : Any = -1 _lowerCAmelCase : List[str] = torch.ones((1, 5) , device=lowercase_ ).long() * model.config.bos_token_id with CaptureStdout() as cs: _lowerCAmelCase : int = TextStreamer(lowercase_ , skip_special_tokens=lowercase_ ) model.generate(lowercase_ , max_new_tokens=1 , do_sample=lowercase_ , streamer=lowercase_ ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token _lowerCAmelCase : str = cs.out[:-1] # Remove the final "\n" _lowerCAmelCase : str = tokenizer(lowercase_ , return_tensors="""pt""" ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def __UpperCamelCase ( self ): _lowerCAmelCase : int = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) _lowerCAmelCase : Optional[int] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowercase_ ) _lowerCAmelCase : List[str] = -1 _lowerCAmelCase : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowercase_ ) _lowerCAmelCase : Optional[Any] = TextIteratorStreamer(lowercase_ , timeout=0.001 ) _lowerCAmelCase : Optional[int] = {"input_ids": input_ids, "max_new_tokens": 1_0, "do_sample": False, "streamer": streamer} _lowerCAmelCase : Tuple = Thread(target=model.generate , kwargs=lowercase_ ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(lowercase_ ): _lowerCAmelCase : List[Any] = "" for new_text in streamer: streamer_text += new_text
309
import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets UpperCamelCase = '''\ @inproceedings{snover-etal-2006-study, title = "A Study of Translation Edit Rate with Targeted Human Annotation", author = "Snover, Matthew and Dorr, Bonnie and Schwartz, Rich and Micciulla, Linnea and Makhoul, John", booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers", month = aug # " 8-12", year = "2006", address = "Cambridge, Massachusetts, USA", publisher = "Association for Machine Translation in the Americas", url = "https://aclanthology.org/2006.amta-papers.25", pages = "223--231", } @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' UpperCamelCase = '''\ TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu (https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found here: https://github.com/jhclark/tercom. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information. ''' UpperCamelCase = ''' Produces TER scores alongside the number of edits and reference length. Args: predictions (list of str): The system stream (a sequence of segments). references (list of list of str): A list of one or more reference streams (each a sequence of segments). normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters, as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana. Only applies if `normalized = True`. Defaults to `False`. case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`. Returns: \'score\' (float): TER score (num_edits / sum_ref_lengths * 100) \'num_edits\' (int): The cumulative number of edits \'ref_length\' (float): The cumulative average reference length Examples: Example 1: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0} Example 2: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0} Example 3: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... normalized=True, ... case_sensitive=True) >>> print(results) {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5} Example 4: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0} Example 5: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class snake_case_ ( datasets.Metric ): def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple: if version.parse(scb.__version__ ) < version.parse("1.4.12" ): raise ImportWarning( "To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n" "You can install it with `pip install \"sacrebleu>=1.4.12\"`." ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[ "https://github.com/jhclark/tercom", ] , ) def __UpperCamelCase ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , ) -> Any: lowercase__ : Optional[int] = len(references[0] ) if any(len(lowercase_ ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) lowercase__ : Union[str, Any] = [[refs[i] for refs in references] for i in range(lowercase_ )] lowercase__ : str = TER( normalized=lowercase_ , no_punct=lowercase_ , asian_support=lowercase_ , case_sensitive=lowercase_ , ) lowercase__ : List[str] = sb_ter.corpus_score(lowercase_ , lowercase_ ) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
87
0
import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @slow def UpperCamelCase ( self ): A__ = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' ) A__ = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' ) model.to(lowercase_ ) from datasets import load_dataset A__ = load_dataset('''nielsr/rvlcdip-demo''' ) A__ = dataset["train"][0]["image"].convert('''RGB''' ) A__ = image_processor(lowercase_,return_tensors='''pt''' ).to(lowercase_ ) # forward pass with torch.no_grad(): A__ = model(**lowercase_ ) A__ = outputs.logits A__ = torch.Size((1, 16) ) self.assertEqual(logits.shape,lowercase_ ) A__ = torch.tensor( [-0.4158, -0.4092, -0.4347],device=lowercase_,dtype=torch.float,) self.assertTrue(torch.allclose(logits[0, :3],lowercase_,atol=1E-4 ) )
193
def lowercase_ ( _lowerCamelCase : int): lowercase__ : Dict = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
87
0
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from .config import config_command_parser from .config_args import default_config_file, load_config_from_file # noqa: F401 from .default import default_command_parser from .update import update_command_parser def A ( _lowercase=None ): SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser(add_help=_lowerCamelCase , allow_abbrev=_lowerCamelCase ) # The main config parser SCREAMING_SNAKE_CASE : Dict = config_command_parser(_lowerCamelCase ) # The subparser to add commands to SCREAMING_SNAKE_CASE : List[str] = config_parser.add_subparsers(title='''subcommands''' , dest='''subcommand''' ) # Then add other parsers with the parent parser default_command_parser(_lowerCamelCase , parents=[parent_parser] ) update_command_parser(_lowerCamelCase , parents=[parent_parser] ) return config_parser def A ( ): SCREAMING_SNAKE_CASE : Tuple = get_config_parser() SCREAMING_SNAKE_CASE : List[Any] = config_parser.parse_args() if not hasattr(_lowerCamelCase , '''func''' ): config_parser.print_help() exit(1 ) # Run args.func(_lowerCamelCase ) if __name__ == "__main__": main()
182
from PIL import Image def lowercase_ ( _lowerCamelCase : Image , _lowerCamelCase : int): lowercase__ : List[str] = (259 * (level + 255)) / (255 * (259 - level)) def contrast(_lowerCamelCase : int) -> int: return int(128 + factor * (c - 128)) return img.point(_lowerCamelCase) if __name__ == "__main__": # Load image with Image.open('''image_data/lena.jpg''') as img: # Change contrast to 170 UpperCamelCase = change_contrast(img, 170) cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
87
0
import argparse from pathlib import Path import torch from packaging import version from torch.onnx import export from diffusers import AutoencoderKL __lowerCAmelCase : Union[str, Any] =version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11') def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=False , ): output_path.parent.mkdir(parents=_lowerCamelCase , exist_ok=_lowerCamelCase ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( _lowerCamelCase , _lowerCamelCase , f=output_path.as_posix() , input_names=_lowerCamelCase , output_names=_lowerCamelCase , dynamic_axes=_lowerCamelCase , do_constant_folding=_lowerCamelCase , use_external_data_format=_lowerCamelCase , enable_onnx_checker=_lowerCamelCase , opset_version=_lowerCamelCase , ) else: export( _lowerCamelCase , _lowerCamelCase , f=output_path.as_posix() , input_names=_lowerCamelCase , output_names=_lowerCamelCase , dynamic_axes=_lowerCamelCase , do_constant_folding=_lowerCamelCase , opset_version=_lowerCamelCase , ) @torch.no_grad() def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = False ): __SCREAMING_SNAKE_CASE : Optional[Any] = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): __SCREAMING_SNAKE_CASE : Tuple = "cuda" elif fpaa and not torch.cuda.is_available(): raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' ) else: __SCREAMING_SNAKE_CASE : Union[str, Any] = "cpu" __SCREAMING_SNAKE_CASE : Any = Path(_lowerCamelCase ) # VAE DECODER __SCREAMING_SNAKE_CASE : Optional[int] = AutoencoderKL.from_pretrained(model_path + '''/vae''' ) __SCREAMING_SNAKE_CASE : str = vae_decoder.config.latent_channels # forward only through the decoder part __SCREAMING_SNAKE_CASE : Optional[int] = vae_decoder.decode onnx_export( _lowerCamelCase , model_args=( torch.randn(1 , _lowerCamelCase , 25 , 25 ).to(device=_lowerCamelCase , dtype=_lowerCamelCase ), False, ) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={ '''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''}, } , opset=_lowerCamelCase , ) del vae_decoder if __name__ == "__main__": __lowerCAmelCase : Union[str, Any] =argparse.ArgumentParser() parser.add_argument( '--model_path', type=str, required=True, help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).', ) parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.') parser.add_argument( '--opset', default=1_4, type=int, help='The version of the ONNX operator set to use.', ) parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode') __lowerCAmelCase : Optional[Any] =parser.parse_args() print(args.output_path) convert_models(args.model_path, args.output_path, args.opset, args.fpaa) print('SD: Done: ONNX')
9
from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar UpperCamelCase = TypeVar('''T''') class snake_case_ ( Generic[T] ): __A : deque[T] # Cache store of keys __A : set[T] # References of the keys in cache __A : int = 10 # Maximum capacity of cache def __init__( self : Union[str, Any] , lowercase_ : int ) -> None: lowercase__ : int = deque() lowercase__ : str = set() if not n: lowercase__ : str = sys.maxsize elif n < 0: raise ValueError("n should be an integer greater than 0." ) else: lowercase__ : List[Any] = n def __UpperCamelCase ( self : Dict , lowercase_ : T ) -> None: if x not in self.key_reference: if len(self.dq_store ) == LRUCache._MAX_CAPACITY: lowercase__ : Dict = self.dq_store.pop() self.key_reference.remove(lowercase_ ) else: self.dq_store.remove(lowercase_ ) self.dq_store.appendleft(lowercase_ ) self.key_reference.add(lowercase_ ) def __UpperCamelCase ( self : Dict ) -> None: for k in self.dq_store: print(lowercase_ ) def __repr__( self : Optional[int] ) -> str: return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}''' if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase = LRUCache(4) lru_cache.refer('''A''') lru_cache.refer(2) lru_cache.refer(3) lru_cache.refer('''A''') lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() print(lru_cache) assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
87
0
"""simple docstring""" import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __lowerCAmelCase ( self : List[str] ): lowerCAmelCase__ : str = "ylacombe/bark-small" lowerCAmelCase__ : Optional[int] = tempfile.mkdtemp() lowerCAmelCase__ : str = "en_speaker_1" lowerCAmelCase__ : List[str] = "This is a test string" lowerCAmelCase__ : Dict = "speaker_embeddings_path.json" lowerCAmelCase__ : List[str] = "speaker_embeddings" def __lowerCAmelCase ( self : Optional[int] ,**lowercase_ : Union[str, Any] ): return AutoTokenizer.from_pretrained(self.checkpoint ,**lowercase_ ) def __lowerCAmelCase ( self : List[str] ): shutil.rmtree(self.tmpdirname ) def __lowerCAmelCase ( self : List[str] ): lowerCAmelCase__ : List[Any] = self.get_tokenizer() lowerCAmelCase__ : Optional[int] = BarkProcessor(tokenizer=lowercase_ ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase__ : List[Any] = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() ) @slow def __lowerCAmelCase ( self : Tuple ): lowerCAmelCase__ : Dict = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,) processor.save_pretrained( self.tmpdirname ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,speaker_embeddings_directory=self.speaker_embeddings_directory ,) lowerCAmelCase__ : str = self.get_tokenizer(bos_token='''(BOS)''' ,eos_token='''(EOS)''' ) lowerCAmelCase__ : Any = BarkProcessor.from_pretrained( self.tmpdirname ,self.speaker_embeddings_dict_path ,bos_token='''(BOS)''' ,eos_token='''(EOS)''' ,) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) def __lowerCAmelCase ( self : List[Any] ): lowerCAmelCase__ : int = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,) lowerCAmelCase__ : List[Any] = 3_5 lowerCAmelCase__ : Any = 2 lowerCAmelCase__ : List[str] = 8 lowerCAmelCase__ : int = { "semantic_prompt": np.ones(lowercase_ ), "coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ), "fine_prompt": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset lowerCAmelCase__ : List[str] = processor(text=self.input_string ,voice_preset=lowercase_ ) lowerCAmelCase__ : Union[str, Any] = inputs["history_prompt"] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(lowercase_ ,np.array([] ) ).tolist() ) # test loading voice preset from npz file lowerCAmelCase__ : Tuple = os.path.join(self.tmpdirname ,'''file.npz''' ) np.savez(lowercase_ ,**lowercase_ ) lowerCAmelCase__ : Tuple = processor(text=self.input_string ,voice_preset=lowercase_ ) lowerCAmelCase__ : Dict = inputs["history_prompt"] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(lowercase_ ,np.array([] ) ).tolist() ) # test loading voice preset from the hub lowerCAmelCase__ : Any = processor(text=self.input_string ,voice_preset=self.voice_preset ) def __lowerCAmelCase ( self : Optional[int] ): lowerCAmelCase__ : Tuple = self.get_tokenizer() lowerCAmelCase__ : Optional[int] = BarkProcessor(tokenizer=lowercase_ ) lowerCAmelCase__ : Dict = processor(text=self.input_string ) lowerCAmelCase__ : int = tokenizer( self.input_string ,padding='''max_length''' ,max_length=2_5_6 ,add_special_tokens=lowercase_ ,return_attention_mask=lowercase_ ,return_token_type_ids=lowercase_ ,) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key].squeeze().tolist() )
106
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''', '''YituTech/conv-bert-medium-small''': ( '''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json''' ), '''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''', # See all ConvBERT models at https://huggingface.co/models?filter=convbert } class snake_case_ ( __A ): __A : List[str] = "convbert" def __init__( self : Union[str, Any] , lowercase_ : str=3_05_22 , lowercase_ : Any=7_68 , lowercase_ : Tuple=12 , lowercase_ : List[str]=12 , lowercase_ : Optional[int]=30_72 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : str=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : Optional[Any]=5_12 , lowercase_ : Dict=2 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Optional[Any]=1E-12 , lowercase_ : Optional[int]=1 , lowercase_ : List[Any]=0 , lowercase_ : Optional[int]=2 , lowercase_ : str=7_68 , lowercase_ : Dict=2 , lowercase_ : Optional[Any]=9 , lowercase_ : Union[str, Any]=1 , lowercase_ : Any=None , **lowercase_ : Optional[Any] , ) -> Dict: super().__init__( pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ , ) lowercase__ : List[str] = vocab_size lowercase__ : Union[str, Any] = hidden_size lowercase__ : Any = num_hidden_layers lowercase__ : List[str] = num_attention_heads lowercase__ : Union[str, Any] = intermediate_size lowercase__ : Optional[Any] = hidden_act lowercase__ : int = hidden_dropout_prob lowercase__ : str = attention_probs_dropout_prob lowercase__ : Union[str, Any] = max_position_embeddings lowercase__ : Optional[int] = type_vocab_size lowercase__ : Tuple = initializer_range lowercase__ : List[str] = layer_norm_eps lowercase__ : List[Any] = embedding_size lowercase__ : Optional[Any] = head_ratio lowercase__ : Dict = conv_kernel_size lowercase__ : Tuple = num_groups lowercase__ : Optional[int] = classifier_dropout class snake_case_ ( __A ): @property def __UpperCamelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowercase__ : Tuple = {0: "batch", 1: "choice", 2: "sequence"} else: lowercase__ : str = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis), ] )
87
0
from packaging import version from .import_utils import is_accelerate_available if is_accelerate_available(): import accelerate def UpperCamelCase ( __magic_name__ : List[str] ) -> Dict: """simple docstring""" if not is_accelerate_available(): return method lowercase__ = version.parse(accelerate.__version__ ).base_version if version.parse(_lowerCamelCase ) < version.parse("""0.17.0""" ): return method def wrapper(self : Optional[int] , *__magic_name__ : str , **__magic_name__ : Optional[Any] ): if hasattr(self , """_hf_hook""" ) and hasattr(self._hf_hook , """pre_forward""" ): self._hf_hook.pre_forward(self ) return method(self , *_lowerCamelCase , **_lowerCamelCase ) return wrapper
305
import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict): # Initialise PyTorch model lowercase__ : List[str] = BertConfig.from_json_file(_lowerCamelCase) print(f'''Building PyTorch model from configuration: {config}''') lowercase__ : Optional[Any] = BertForPreTraining(_lowerCamelCase) # Load weights from tf checkpoint load_tf_weights_in_bert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''') torch.save(model.state_dict() , _lowerCamelCase) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--bert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained BERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) UpperCamelCase = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
87
0
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing the experiment tracking capability, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _lowercase : Optional[Any] =16 _lowercase : int =32 def lowerCAmelCase_ ( _lowercase : Accelerator , _lowercase : int = 16) -> int: """simple docstring""" a__ : List[Any] = AutoTokenizer.from_pretrained("""bert-base-cased""") a__ : Optional[int] = load_dataset("""glue""" , """mrpc""") def tokenize_function(_lowercase : Dict): # max_length=None => use the model max length (it's actually the default) a__ : Tuple = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_lowerCamelCase , max_length=_lowerCamelCase) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): a__ : Optional[int] = datasets.map( _lowerCamelCase , batched=_lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library a__ : int = tokenized_datasets.rename_column("""label""" , """labels""") def collate_fn(_lowercase : int): # On TPU it's best to pad everything to the same length or training will be very slow. a__ : str = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": a__ : List[Any] = 16 elif accelerator.mixed_precision != "no": a__ : Any = 8 else: a__ : Tuple = None return tokenizer.pad( _lowerCamelCase , padding="""longest""" , max_length=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_tensors="""pt""" , ) # Instantiate dataloaders. a__ : Dict = DataLoader( tokenized_datasets["""train"""] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase) a__ : Optional[int] = DataLoader( tokenized_datasets["""validation"""] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders _lowercase : str =mocked_dataloaders # noqa: F811 def lowerCAmelCase_ ( _lowercase : Dict , _lowercase : Optional[Any]) -> Optional[Any]: """simple docstring""" # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , _lowerCamelCase) == "1": a__ : int = 2 # Initialize Accelerator # New Code # # We pass in "all" to `log_with` to grab all available trackers in the environment # Note: If using a custom `Tracker` class, should be passed in here such as: # >>> log_with = ["all", MyCustomTrackerClassInstance()] if args.with_tracking: a__ : int = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir) else: a__ : List[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs a__ : Any = config["lr"] a__ : List[str] = int(config["""num_epochs"""]) a__ : List[str] = int(config["""seed"""]) a__ : int = int(config["""batch_size"""]) set_seed(_lowerCamelCase) a__ : Optional[Any] = get_dataloaders(_lowerCamelCase , _lowerCamelCase) a__ : List[str] = evaluate.load("""glue""" , """mrpc""") # If the batch size is too big we use gradient accumulation a__ : Tuple = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: a__ : Optional[int] = batch_size // MAX_GPU_BATCH_SIZE a__ : Union[str, Any] = MAX_GPU_BATCH_SIZE # Instantiate the model (we build the model here so that the seed also control new weights initialization) a__ : Tuple = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=_lowerCamelCase) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). a__ : Any = model.to(accelerator.device) # Instantiate optimizer a__ : List[str] = AdamW(params=model.parameters() , lr=_lowerCamelCase) # Instantiate scheduler a__ : Optional[int] = get_linear_schedule_with_warmup( optimizer=_lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCamelCase) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. a__ : Optional[Any] = accelerator.prepare( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # New Code # # We need to initialize the trackers we use. Overall configurations can also be stored if args.with_tracking: a__ : Dict = os.path.split(_lowerCamelCase)[-1].split(""".""")[0] accelerator.init_trackers(_lowerCamelCase , _lowerCamelCase) # Now we train the model for epoch in range(_lowerCamelCase): model.train() # New Code # # For our tracking example, we will log the total loss of each epoch if args.with_tracking: a__ : Optional[Any] = 0 for step, batch in enumerate(_lowerCamelCase): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) a__ : Dict = model(**_lowerCamelCase) a__ : Tuple = outputs.loss # New Code # if args.with_tracking: total_loss += loss.detach().float() a__ : str = loss / gradient_accumulation_steps accelerator.backward(_lowerCamelCase) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_lowerCamelCase): # We could avoid this line since we set the accelerator with `device_placement=True` (the default). batch.to(accelerator.device) with torch.no_grad(): a__ : Tuple = model(**_lowerCamelCase) a__ : str = outputs.logits.argmax(dim=-1) a__ : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""])) metric.add_batch( predictions=_lowerCamelCase , references=_lowerCamelCase , ) a__ : Dict = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , _lowerCamelCase) # New Code # # To actually log, we call `Accelerator.log` # The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int` if args.with_tracking: accelerator.log( { """accuracy""": eval_metric["""accuracy"""], """f1""": eval_metric["""f1"""], """train_loss""": total_loss.item() / len(_lowerCamelCase), """epoch""": epoch, } , step=_lowerCamelCase , ) # New Code # # When a run is finished, you should call `accelerator.end_training()` # to close all of the open trackers if args.with_tracking: accelerator.end_training() def lowerCAmelCase_ ( ) -> int: """simple docstring""" a__ : str = argparse.ArgumentParser(description="""Simple example of training script.""") parser.add_argument( """--mixed_precision""" , type=_lowerCamelCase , default=_lowerCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""") parser.add_argument( """--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , ) parser.add_argument( """--project_dir""" , type=_lowerCamelCase , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , ) a__ : int = parser.parse_args() a__ : Tuple = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(_lowerCamelCase , _lowerCamelCase) if __name__ == "__main__": main()
170
import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse('''3.8'''): import importlib_metadata else: import importlib.metadata as importlib_metadata def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : List[str]=False): try: lowercase__ : Union[str, Any] = os.environ[key] except KeyError: # KEY isn't set, default to `default`. lowercase__ : int = default else: # KEY is set, convert it to True or False. try: lowercase__ : Optional[int] = strtobool(_lowerCamelCase) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f'''If set, {key} must be yes or no.''') return _value UpperCamelCase = parse_flag_from_env('''RUN_SLOW''', default=False) UpperCamelCase = parse_flag_from_env('''RUN_REMOTE''', default=False) UpperCamelCase = parse_flag_from_env('''RUN_LOCAL''', default=True) UpperCamelCase = parse_flag_from_env('''RUN_PACKAGED''', default=True) # Compression UpperCamelCase = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''') UpperCamelCase = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''') UpperCamelCase = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''') # Audio UpperCamelCase = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''), reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''', ) # Beam UpperCamelCase = pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''), reason='''test requires apache-beam and a compatible dill version''', ) # Dill-cloudpickle compatibility UpperCamelCase = pytest.mark.skipif( config.DILL_VERSION <= version.parse('''0.3.2'''), reason='''test requires dill>0.3.2 for cloudpickle compatibility''', ) # Windows UpperCamelCase = pytest.mark.skipif( sys.platform == '''win32''', reason='''test should not be run on Windows''', ) def lowercase_ ( _lowerCamelCase : int): try: import faiss # noqa except ImportError: lowercase__ : Optional[Any] = unittest.skip("test requires faiss")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): try: import regex # noqa except ImportError: lowercase__ : List[Any] = unittest.skip("test requires regex")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): try: import elasticsearch # noqa except ImportError: lowercase__ : Optional[int] = unittest.skip("test requires elasticsearch")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Union[str, Any]): try: import sqlalchemy # noqa except ImportError: lowercase__ : Optional[int] = unittest.skip("test requires sqlalchemy")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): if not config.TORCH_AVAILABLE: lowercase__ : Tuple = unittest.skip("test requires PyTorch")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Tuple): if not config.TF_AVAILABLE: lowercase__ : Any = unittest.skip("test requires TensorFlow")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Dict): if not config.JAX_AVAILABLE: lowercase__ : List[str] = unittest.skip("test requires JAX")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): if not config.PIL_AVAILABLE: lowercase__ : Dict = unittest.skip("test requires Pillow")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Tuple): try: import transformers # noqa F401 except ImportError: return unittest.skip("test requires transformers")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : Optional[Any]): try: import tiktoken # noqa F401 except ImportError: return unittest.skip("test requires tiktoken")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : Dict): try: import spacy # noqa F401 except ImportError: return unittest.skip("test requires spacy")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : Optional[int]): def _require_spacy_model(_lowerCamelCase : Optional[int]): try: import spacy # noqa F401 spacy.load(_lowerCamelCase) except ImportError: return unittest.skip("test requires spacy")(_lowerCamelCase) except OSError: return unittest.skip("test requires spacy model '{}'".format(_lowerCamelCase))(_lowerCamelCase) else: return test_case return _require_spacy_model def lowercase_ ( _lowerCamelCase : Dict): try: import pyspark # noqa F401 except ImportError: return unittest.skip("test requires pyspark")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : List[str]): try: import joblibspark # noqa F401 except ImportError: return unittest.skip("test requires joblibspark")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : Dict): if not _run_slow_tests or _run_slow_tests == 0: lowercase__ : Tuple = unittest.skip("test is slow")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): if not _run_local_tests or _run_local_tests == 0: lowercase__ : str = unittest.skip("test is local")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Optional[int]): if not _run_packaged_tests or _run_packaged_tests == 0: lowercase__ : List[Any] = unittest.skip("test is packaged")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Tuple): if not _run_remote_tests or _run_remote_tests == 0: lowercase__ : Union[str, Any] = unittest.skip("test requires remote")(_lowerCamelCase) return test_case def lowercase_ ( *_lowerCamelCase : str): def decorate(cls : str): for name, fn in cls.__dict__.items(): if callable(_lowerCamelCase) and name.startswith("test"): for decorator in decorators: lowercase__ : Optional[int] = decorator(_lowerCamelCase) setattr(cls , _lowerCamelCase , _lowerCamelCase) return cls return decorate class snake_case_ ( __A ): pass class snake_case_ ( __A ): __A : List[Any] = 0 __A : str = 1 __A : int = 2 @contextmanager def lowercase_ ( _lowerCamelCase : List[str]=OfflineSimulationMode.CONNECTION_FAILS , _lowerCamelCase : int=1E-16): lowercase__ : int = requests.Session().request def timeout_request(_lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Dict , **_lowerCamelCase : str): # Change the url to an invalid url so that the connection hangs lowercase__ : Any = "https://10.255.255.1" if kwargs.get("timeout") is None: raise RequestWouldHangIndefinitelyError( f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''') lowercase__ : Dict = timeout try: return online_request(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier lowercase__ : Dict = url lowercase__ : Union[str, Any] = e.args[0] lowercase__ : Optional[Any] = (max_retry_error.args[0].replace("10.255.255.1" , f'''OfflineMock[{url}]'''),) lowercase__ : int = (max_retry_error,) raise def raise_connection_error(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] , **_lowerCamelCase : Tuple): raise requests.ConnectionError("Offline mode is enabled." , request=_lowerCamelCase) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch("requests.Session.send" , _lowerCamelCase): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch("requests.Session.request" , _lowerCamelCase): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase): yield else: raise ValueError("Please use a value from the OfflineSimulationMode enum.") @contextmanager def lowercase_ ( *_lowerCamelCase : str , **_lowerCamelCase : Tuple): lowercase__ : Dict = str(Path().resolve()) with tempfile.TemporaryDirectory(*_lowerCamelCase , **_lowerCamelCase) as tmp_dir: try: os.chdir(_lowerCamelCase) yield finally: os.chdir(_lowerCamelCase) @contextmanager def lowercase_ ( ): import gc gc.collect() lowercase__ : Union[str, Any] = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def lowercase_ ( ): import gc gc.collect() lowercase__ : int = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]): return deepcopy(_lowerCamelCase).integers(0 , 100 , 10).tolist() == deepcopy(_lowerCamelCase).integers(0 , 100 , 10).tolist() def lowercase_ ( _lowerCamelCase : str): import decorator from requests.exceptions import HTTPError def _wrapper(_lowerCamelCase : str , *_lowerCamelCase : Dict , **_lowerCamelCase : Dict): try: return func(*_lowerCamelCase , **_lowerCamelCase) except HTTPError as err: if str(_lowerCamelCase).startswith("500") or str(_lowerCamelCase).startswith("502"): pytest.xfail(str(_lowerCamelCase)) raise err return decorator.decorator(_wrapper , _lowerCamelCase) class snake_case_ : def __init__( self : int , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : List[str] ) -> List[str]: lowercase__ : Tuple = returncode lowercase__ : int = stdout lowercase__ : Union[str, Any] = stderr async def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict): while True: lowercase__ : Optional[int] = await stream.readline() if line: callback(_lowerCamelCase) else: break async def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : int=None , _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Tuple=False): if echo: print("\nRunning: " , " ".join(_lowerCamelCase)) lowercase__ : Optional[int] = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) lowercase__ : str = [] lowercase__ : List[str] = [] def tee(_lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int]=""): lowercase__ : Optional[int] = line.decode("utf-8").rstrip() sink.append(_lowerCamelCase) if not quiet: print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label="stdout:")), _read_stream(p.stderr , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label="stderr:")), ] , timeout=_lowerCamelCase , ) return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase) def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str]=None , _lowerCamelCase : Dict=None , _lowerCamelCase : int=180 , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Optional[Any]=True): lowercase__ : Any = asyncio.get_event_loop() lowercase__ : Tuple = loop.run_until_complete( _stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase)) lowercase__ : int = " ".join(_lowerCamelCase) if result.returncode > 0: lowercase__ : Any = "\n".join(result.stderr) raise RuntimeError( f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n''' f'''The combined stderr from workers follows:\n{stderr}''') # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(f'''\'{cmd_str}\' produced no output.''') return result def lowercase_ ( ): lowercase__ : List[str] = os.environ.get("PYTEST_XDIST_WORKER" , "gw0") lowercase__ : str = re.sub(R"^gw" , "" , _lowerCamelCase , 0 , re.M) return int(_lowerCamelCase) def lowercase_ ( ): lowercase__ : Union[str, Any] = 2_9500 lowercase__ : Optional[int] = pytest_xdist_worker_id() return port + uniq_delta
87
0
'''simple docstring''' import warnings from diffusers import StableDiffusionImgaImgPipeline # noqa F401 warnings.warn( 'The `image_to_image.py` script is outdated. Please use directly `from diffusers import' ' StableDiffusionImg2ImgPipeline` instead.' )
80
import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def lowercase_ ( _lowerCamelCase : int): lowercase__ : int = [] embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''', f'''stage{idx}.patch_embed.proj.weight''', )) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''', f'''stage{idx}.patch_embed.proj.bias''', )) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''', f'''stage{idx}.patch_embed.norm.weight''', )) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''', f'''stage{idx}.patch_embed.norm.bias''', )) return embed def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : int): lowercase__ : Optional[Any] = [] attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj.bias''', )) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''')) return attention_weights def lowercase_ ( _lowerCamelCase : Optional[int]): lowercase__ : Tuple = [] token.append((f'''cvt.encoder.stages.{idx}.cls_token''', "stage2.cls_token")) return token def lowercase_ ( ): lowercase__ : List[str] = [] head.append(("layernorm.weight", "norm.weight")) head.append(("layernorm.bias", "norm.bias")) head.append(("classifier.weight", "head.weight")) head.append(("classifier.bias", "head.bias")) return head def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]): lowercase__ : Optional[Any] = "imagenet-1k-id2label.json" lowercase__ : List[str] = 1000 lowercase__ : Dict = "huggingface/label-files" lowercase__ : List[Any] = num_labels lowercase__ : Tuple = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset")) , "r")) lowercase__ : Tuple = {int(_lowerCamelCase): v for k, v in idalabel.items()} lowercase__ : Any = idalabel lowercase__ : List[Any] = {v: k for k, v in idalabel.items()} lowercase__ : Optional[int] = CvtConfig(num_labels=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit("/" , 1)[-1][4:6] == "13": lowercase__ : Any = [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit("/" , 1)[-1][4:6] == "21": lowercase__ : Tuple = [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: lowercase__ : Union[str, Any] = [2, 2, 20] lowercase__ : Optional[Any] = [3, 12, 16] lowercase__ : Optional[Any] = [192, 768, 1024] lowercase__ : Union[str, Any] = CvtForImageClassification(_lowerCamelCase) lowercase__ : Tuple = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k") lowercase__ : int = image_size lowercase__ : Dict = torch.load(_lowerCamelCase , map_location=torch.device("cpu")) lowercase__ : Any = OrderedDict() lowercase__ : int = [] for idx in range(len(config.depth)): if config.cls_token[idx]: lowercase__ : Dict = list_of_state_dict + cls_token(_lowerCamelCase) lowercase__ : List[str] = list_of_state_dict + embeddings(_lowerCamelCase) for cnt in range(config.depth[idx]): lowercase__ : Any = list_of_state_dict + attention(_lowerCamelCase , _lowerCamelCase) lowercase__ : List[str] = list_of_state_dict + final() for gg in list_of_state_dict: print(_lowerCamelCase) for i in range(len(_lowerCamelCase)): lowercase__ : Dict = original_weights[list_of_state_dict[i][1]] model.load_state_dict(_lowerCamelCase) model.save_pretrained(_lowerCamelCase) image_processor.save_pretrained(_lowerCamelCase) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument( '''--cvt_model''', default='''cvt-w24''', type=str, help='''Name of the cvt model you\'d like to convert.''', ) parser.add_argument( '''--image_size''', default=384, type=int, help='''Input Image Size''', ) parser.add_argument( '''--cvt_file_name''', default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''', type=str, help='''Input Image Size''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) UpperCamelCase = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
87
0
from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class A ( yaml.SafeLoader ): def lowerCamelCase ( self : Optional[Any] , lowercase_ : str ) -> Any: """simple docstring""" _lowerCamelCase : Union[str, Any] =[self.constructed_objects[key_node] for key_node, _ in node.value] _lowerCamelCase : Optional[int] =[tuple(lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else key for key in keys] _lowerCamelCase : Optional[Any] =Counter(lowercase_ ) _lowerCamelCase : Any =[key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(F'''Got duplicate yaml keys: {duplicate_keys}''' ) def lowerCamelCase ( self : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any]=False ) -> Dict: """simple docstring""" _lowerCamelCase : str =super().construct_mapping(lowercase_ , deep=lowercase_ ) self._check_no_duplicates_on_constructed_node(lowercase_ ) return mapping def a_ ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' _lowerCamelCase : Any =list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: _lowerCamelCase : List[str] =full_content[1:].index('---' ) + 1 _lowerCamelCase : str ="\n".join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(_lowerCamelCase ) class A ( __A ): # class attributes UpperCamelCase__ : Dict ={"train_eval_index"} # train-eval-index in the YAML metadata @classmethod def lowerCamelCase ( cls : List[str] , lowercase_ : Path ) -> "DatasetMetadata": """simple docstring""" with open(lowercase_ , encoding='utf-8' ) as readme_file: _lowerCamelCase : List[str] =_split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(lowercase_ ) else: return cls() def lowerCamelCase ( self : Dict , lowercase_ : Path ) -> Optional[int]: """simple docstring""" if path.exists(): with open(lowercase_ , encoding='utf-8' ) as readme_file: _lowerCamelCase : Optional[Any] =readme_file.read() else: _lowerCamelCase : Any =None _lowerCamelCase : Any =self._to_readme(lowercase_ ) with open(lowercase_ , 'w' , encoding='utf-8' ) as readme_file: readme_file.write(lowercase_ ) def lowerCamelCase ( self : int , lowercase_ : Optional[str] = None ) -> str: """simple docstring""" if readme_content is not None: _lowerCamelCase : Tuple =_split_yaml_from_readme(lowercase_ ) _lowerCamelCase : str ="---\n" + self.to_yaml_string() + "---\n" + content else: _lowerCamelCase : Optional[int] ="---\n" + self.to_yaml_string() + "---\n" return full_content @classmethod def lowerCamelCase ( cls : List[str] , lowercase_ : str ) -> "DatasetMetadata": """simple docstring""" _lowerCamelCase : Optional[int] =yaml.load(lowercase_ , Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields _lowerCamelCase : int ={ (key.replace('-' , '_' ) if key.replace('-' , '_' ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**lowercase_ ) def lowerCamelCase ( self : Union[str, Any] ) -> str: """simple docstring""" return yaml.safe_dump( { (key.replace('_' , '-' ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } , sort_keys=lowercase_ , allow_unicode=lowercase_ , encoding='utf-8' , ).decode('utf-8' ) lowerCamelCase = { 'image-classification': [], 'translation': [], 'image-segmentation': [], 'fill-mask': [], 'automatic-speech-recognition': [], 'token-classification': [], 'sentence-similarity': [], 'audio-classification': [], 'question-answering': [], 'summarization': [], 'zero-shot-classification': [], 'table-to-text': [], 'feature-extraction': [], 'other': [], 'multiple-choice': [], 'text-classification': [], 'text-to-image': [], 'text2text-generation': [], 'zero-shot-image-classification': [], 'tabular-classification': [], 'tabular-regression': [], 'image-to-image': [], 'tabular-to-text': [], 'unconditional-image-generation': [], 'text-retrieval': [], 'text-to-speech': [], 'object-detection': [], 'audio-to-audio': [], 'text-generation': [], 'conversational': [], 'table-question-answering': [], 'visual-question-answering': [], 'image-to-text': [], 'reinforcement-learning': [], 'voice-activity-detection': [], 'time-series-forecasting': [], 'document-question-answering': [], } if __name__ == "__main__": from argparse import ArgumentParser lowerCamelCase = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.') ap.add_argument('readme_filepath') lowerCamelCase = ap.parse_args() lowerCamelCase = Path(args.readme_filepath) lowerCamelCase = DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
199
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase = { '''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''], '''tokenization_electra''': ['''ElectraTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''ElectraTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ElectraForCausalLM''', '''ElectraForMaskedLM''', '''ElectraForMultipleChoice''', '''ElectraForPreTraining''', '''ElectraForQuestionAnswering''', '''ElectraForSequenceClassification''', '''ElectraForTokenClassification''', '''ElectraModel''', '''ElectraPreTrainedModel''', '''load_tf_weights_in_electra''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFElectraForMaskedLM''', '''TFElectraForMultipleChoice''', '''TFElectraForPreTraining''', '''TFElectraForQuestionAnswering''', '''TFElectraForSequenceClassification''', '''TFElectraForTokenClassification''', '''TFElectraModel''', '''TFElectraPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''FlaxElectraForCausalLM''', '''FlaxElectraForMaskedLM''', '''FlaxElectraForMultipleChoice''', '''FlaxElectraForPreTraining''', '''FlaxElectraForQuestionAnswering''', '''FlaxElectraForSequenceClassification''', '''FlaxElectraForTokenClassification''', '''FlaxElectraModel''', '''FlaxElectraPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
87
0
"""simple docstring""" from __future__ import annotations def lowercase ( __snake_case : list ): if not nums: raise ValueError('''List is empty''' ) return sum(_lowerCamelCase ) / len(_lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
33
import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case_ ( __A ,unittest.TestCase ): __A : Union[str, Any] = LEDTokenizer __A : Union[str, Any] = LEDTokenizerFast __A : Optional[Any] = True def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]: super().setUp() lowercase__ : List[str] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] lowercase__ : Optional[int] = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) ) lowercase__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] lowercase__ : Tuple = {"unk_token": "<unk>"} lowercase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowercase_ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowercase_ ) ) def __UpperCamelCase ( self : int , **lowercase_ : str ) -> List[Any]: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ ) def __UpperCamelCase ( self : List[Any] , **lowercase_ : Any ) -> List[Any]: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ ) def __UpperCamelCase ( self : str , lowercase_ : Any ) -> Tuple: return "lower newer", "lower newer" @cached_property def __UpperCamelCase ( self : Tuple ) -> Optional[Any]: return LEDTokenizer.from_pretrained("allenai/led-base-16384" ) @cached_property def __UpperCamelCase ( self : Tuple ) -> int: return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" ) @require_torch def __UpperCamelCase ( self : int ) -> List[Any]: lowercase__ : Union[str, Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."] lowercase__ : str = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : Dict = tokenizer(lowercase_ , max_length=len(lowercase_ ) , padding=lowercase_ , return_tensors="pt" ) self.assertIsInstance(lowercase_ , lowercase_ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) lowercase__ : Union[str, Any] = batch.input_ids.tolist()[0] self.assertListEqual(lowercase_ , lowercase_ ) @require_torch def __UpperCamelCase ( self : List[str] ) -> Tuple: lowercase__ : Dict = ["A long paragraph for summarization.", "Another paragraph for summarization."] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : Optional[int] = tokenizer(lowercase_ , padding=lowercase_ , return_tensors="pt" ) self.assertIn("input_ids" , lowercase_ ) self.assertIn("attention_mask" , lowercase_ ) self.assertNotIn("labels" , lowercase_ ) self.assertNotIn("decoder_attention_mask" , lowercase_ ) @require_torch def __UpperCamelCase ( self : Optional[Any] ) -> Any: lowercase__ : Dict = [ "Summary of the text.", "Another summary.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : Dict = tokenizer(text_target=lowercase_ , max_length=32 , padding="max_length" , return_tensors="pt" ) self.assertEqual(32 , targets["input_ids"].shape[1] ) @require_torch def __UpperCamelCase ( self : Optional[int] ) -> Tuple: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : int = tokenizer( ["I am a small frog" * 10_24, "I am a small frog"] , padding=lowercase_ , truncation=lowercase_ , return_tensors="pt" ) self.assertIsInstance(lowercase_ , lowercase_ ) self.assertEqual(batch.input_ids.shape , (2, 51_22) ) @require_torch def __UpperCamelCase ( self : List[str] ) -> Any: lowercase__ : Union[str, Any] = ["A long paragraph for summarization."] lowercase__ : List[Any] = [ "Summary of the text.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : List[Any] = tokenizer(lowercase_ , return_tensors="pt" ) lowercase__ : Dict = tokenizer(text_target=lowercase_ , return_tensors="pt" ) lowercase__ : Optional[int] = inputs["input_ids"] lowercase__ : str = targets["input_ids"] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def __UpperCamelCase ( self : Union[str, Any] ) -> Dict: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : int = ["Summary of the text.", "Another summary."] lowercase__ : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] lowercase__ : Tuple = tokenizer(lowercase_ , padding=lowercase_ ) lowercase__ : int = [[0] * len(lowercase_ ) for x in encoded_output["input_ids"]] lowercase__ : Any = tokenizer.pad(lowercase_ ) self.assertSequenceEqual(outputs["global_attention_mask"] , lowercase_ ) def __UpperCamelCase ( self : int ) -> Union[str, Any]: pass def __UpperCamelCase ( self : int ) -> Optional[Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) lowercase__ : List[str] = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) lowercase__ : List[Any] = "A, <mask> AllenNLP sentence." lowercase__ : Tuple = tokenizer_r.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ ) lowercase__ : List[str] = tokenizer_p.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ ) self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) ) self.assertEqual( sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , ) lowercase__ : Any = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] ) lowercase__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] ) self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
87
0
'''simple docstring''' from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class A__ ( __A ): @slow @require_torch def __UpperCAmelCase ( self :Optional[Any] ) -> Optional[Any]: '''simple docstring''' _a : List[Any] =EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" ) _a : str =BertTokenizer.from_pretrained("""bert-base-uncased""" ) _a : Tuple =bertabert.config.encoder.vocab_size _a : Tuple =tokenizer.sep_token_id _a : Dict =tokenizer.cls_token_id _a : Optional[int] =1_2_8 _a : List[str] =datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" ) _a : Any =datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" ) _a : List[Any] =train_dataset.select(range(3_2 ) ) _a : str =val_dataset.select(range(1_6 ) ) _a : Union[str, Any] =4 def _map_to_encoder_decoder_inputs(SCREAMING_SNAKE_CASE :List[str] ): # Tokenizer will automatically set [BOS] <text> [EOS] _a : Union[str, Any] =tokenizer(batch["""article"""] , padding="""max_length""" , truncation=lowercase_ , max_length=5_1_2 ) _a : str =tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=lowercase_ , max_length=1_2_8 ) _a : Tuple =inputs.input_ids _a : int =inputs.attention_mask _a : Optional[int] =outputs.input_ids _a : Union[str, Any] =outputs.input_ids.copy() _a : str =[ [-1_0_0 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"] ] _a : Union[str, Any] =outputs.attention_mask assert all(len(lowercase_ ) == 5_1_2 for x in inputs.input_ids ) assert all(len(lowercase_ ) == 1_2_8 for x in outputs.input_ids ) return batch def _compute_metrics(SCREAMING_SNAKE_CASE :List[str] ): _a : List[str] =pred.label_ids _a : int =pred.predictions # all unnecessary tokens are removed _a : Tuple =tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ ) _a : List[str] =tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ ) _a : Optional[int] =sum([int(pred_str[i] == label_str[i] ) for i in range(len(lowercase_ ) )] ) / len(lowercase_ ) return {"accuracy": accuracy} # map train dataset _a : str =train_dataset.map( _map_to_encoder_decoder_inputs , batched=lowercase_ , batch_size=lowercase_ , remove_columns=["""article""", """highlights"""] , ) train_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) # same for validation dataset _a : Dict =val_dataset.map( _map_to_encoder_decoder_inputs , batched=lowercase_ , batch_size=lowercase_ , remove_columns=["""article""", """highlights"""] , ) val_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) _a : str =self.get_auto_remove_tmp_dir() _a : Any =SeqaSeqTrainingArguments( output_dir=lowercase_ , per_device_train_batch_size=lowercase_ , per_device_eval_batch_size=lowercase_ , predict_with_generate=lowercase_ , evaluation_strategy="""steps""" , do_train=lowercase_ , do_eval=lowercase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer _a : Union[str, Any] =SeqaSeqTrainer( model=lowercase_ , args=lowercase_ , compute_metrics=_compute_metrics , train_dataset=lowercase_ , eval_dataset=lowercase_ , tokenizer=lowercase_ , ) # start training trainer.train()
276
import math from typing import Any, Callable, List, Optional, Tuple, Union import numpy as np import torch from ...models import TaFilmDecoder from ...schedulers import DDPMScheduler from ...utils import is_onnx_available, logging, randn_tensor if is_onnx_available(): from ..onnx_utils import OnnxRuntimeModel from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .continous_encoder import SpectrogramContEncoder from .notes_encoder import SpectrogramNotesEncoder UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name UpperCamelCase = 256 class snake_case_ ( __A ): __A : str = ["melgan"] def __init__( self : str , lowercase_ : SpectrogramNotesEncoder , lowercase_ : SpectrogramContEncoder , lowercase_ : TaFilmDecoder , lowercase_ : DDPMScheduler , lowercase_ : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None: super().__init__() # From MELGAN lowercase__ : List[Any] = math.log(1E-5 ) # Matches MelGAN training. lowercase__ : str = 4.0 # Largest value for most examples lowercase__ : Any = 1_28 self.register_modules( notes_encoder=lowercase_ , continuous_encoder=lowercase_ , decoder=lowercase_ , scheduler=lowercase_ , melgan=lowercase_ , ) def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str]=(-1.0, 1.0) , lowercase_ : Dict=False ) -> Optional[Any]: lowercase__ , lowercase__ : int = output_range if clip: lowercase__ : Optional[Any] = torch.clip(lowercase_ , self.min_value , self.max_value ) # Scale to [0, 1]. lowercase__ : List[str] = (features - self.min_value) / (self.max_value - self.min_value) # Scale to [min_out, max_out]. return zero_one * (max_out - min_out) + min_out def __UpperCamelCase ( self : Optional[int] , lowercase_ : List[str] , lowercase_ : List[str]=(-1.0, 1.0) , lowercase_ : List[Any]=False ) -> Union[str, Any]: lowercase__ , lowercase__ : Tuple = input_range lowercase__ : Optional[Any] = torch.clip(lowercase_ , lowercase_ , lowercase_ ) if clip else outputs # Scale to [0, 1]. lowercase__ : Union[str, Any] = (outputs - min_out) / (max_out - min_out) # Scale to [self.min_value, self.max_value]. return zero_one * (self.max_value - self.min_value) + self.min_value def __UpperCamelCase ( self : List[str] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Tuple ) -> List[str]: lowercase__ : Optional[Any] = input_tokens > 0 lowercase__ , lowercase__ : int = self.notes_encoder( encoder_input_tokens=lowercase_ , encoder_inputs_mask=lowercase_ ) lowercase__ , lowercase__ : List[Any] = self.continuous_encoder( encoder_inputs=lowercase_ , encoder_inputs_mask=lowercase_ ) return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)] def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : str ) -> Tuple: lowercase__ : Union[str, Any] = noise_time if not torch.is_tensor(lowercase_ ): lowercase__ : Optional[Any] = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device ) elif torch.is_tensor(lowercase_ ) and len(timesteps.shape ) == 0: lowercase__ : Optional[Any] = timesteps[None].to(input_tokens.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML lowercase__ : int = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device ) lowercase__ : str = self.decoder( encodings_and_masks=lowercase_ , decoder_input_tokens=lowercase_ , decoder_noise_time=lowercase_ ) return logits @torch.no_grad() def __call__( self : List[str] , lowercase_ : List[List[int]] , lowercase_ : Optional[torch.Generator] = None , lowercase_ : int = 1_00 , lowercase_ : bool = True , lowercase_ : str = "numpy" , lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]: if (callback_steps is None) or ( callback_steps is not None and (not isinstance(lowercase_ , lowercase_ ) or callback_steps <= 0) ): raise ValueError( F'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' F''' {type(lowercase_ )}.''' ) lowercase__ : str = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa ) lowercase__ : Optional[int] = np.zeros([1, 0, self.n_dims] , np.floataa ) lowercase__ : str = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device ) for i, encoder_input_tokens in enumerate(lowercase_ ): if i == 0: lowercase__ : Union[str, Any] = torch.from_numpy(pred_mel[:1].copy() ).to( device=self.device , dtype=self.decoder.dtype ) # The first chunk has no previous context. lowercase__ : List[str] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device ) else: # The full song pipeline does not feed in a context feature, so the mask # will be all 0s after the feature converter. Because we know we're # feeding in a full context chunk from the previous prediction, set it # to all 1s. lowercase__ : str = ones lowercase__ : str = self.scale_features( lowercase_ , output_range=[-1.0, 1.0] , clip=lowercase_ ) lowercase__ : str = self.encode( input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=lowercase_ , continuous_mask=lowercase_ , ) # Sample encoder_continuous_inputs shaped gaussian noise to begin loop lowercase__ : List[str] = randn_tensor( shape=encoder_continuous_inputs.shape , generator=lowercase_ , device=self.device , dtype=self.decoder.dtype , ) # set step values self.scheduler.set_timesteps(lowercase_ ) # Denoising diffusion loop for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): lowercase__ : Optional[int] = self.decode( encodings_and_masks=lowercase_ , input_tokens=lowercase_ , noise_time=t / self.scheduler.config.num_train_timesteps , ) # Compute previous output: x_t -> x_t-1 lowercase__ : Optional[Any] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample lowercase__ : Tuple = self.scale_to_features(lowercase_ , input_range=[-1.0, 1.0] ) lowercase__ : List[str] = mel[:1] lowercase__ : Optional[int] = mel.cpu().float().numpy() lowercase__ : str = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 ) # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(lowercase_ , lowercase_ ) logger.info("Generated segment" , lowercase_ ) if output_type == "numpy" and not is_onnx_available(): raise ValueError( "Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." ) elif output_type == "numpy" and self.melgan is None: raise ValueError( "Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." ) if output_type == "numpy": lowercase__ : Union[str, Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) ) else: lowercase__ : Dict = full_pred_mel if not return_dict: return (output,) return AudioPipelineOutput(audios=lowercase_ )
87
0
'''simple docstring''' import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class a_ (__A ): __lowerCAmelCase : List[Any] = (UnCLIPScheduler,) def __UpperCamelCase ( self , **snake_case_ ): _lowerCAmelCase : int = { "num_train_timesteps": 1_0_0_0, "variance_type": "fixed_small_log", "clip_sample": True, "clip_sample_range": 1.0, "prediction_type": "epsilon", } config.update(**lowercase_ ) return config def __UpperCamelCase ( self ): for timesteps in [1, 5, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=lowercase_ ) def __UpperCamelCase ( self ): for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=lowercase_ ) def __UpperCamelCase ( self ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=lowercase_ ) def __UpperCamelCase ( self ): for clip_sample_range in [1, 5, 1_0, 2_0]: self.check_over_configs(clip_sample_range=lowercase_ ) def __UpperCamelCase ( self ): for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=lowercase_ ) def __UpperCamelCase ( self ): for time_step in [0, 5_0_0, 9_9_9]: for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=lowercase_ , prev_timestep=lowercase_ ) def __UpperCamelCase ( self ): _lowerCAmelCase : Dict = self.scheduler_classes[0] _lowerCAmelCase : Optional[Any] = self.get_scheduler_config(variance_type="""fixed_small_log""" ) _lowerCAmelCase : Optional[int] = scheduler_class(**lowercase_ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.00_00E-10 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.054_9625 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.999_4987 ) ) < 1E-5 def __UpperCamelCase ( self ): _lowerCAmelCase : Dict = self.scheduler_classes[0] _lowerCAmelCase : Optional[Any] = self.get_scheduler_config(variance_type="""learned_range""" ) _lowerCAmelCase : Any = scheduler_class(**lowercase_ ) _lowerCAmelCase : Tuple = 0.5 assert scheduler._get_variance(1 , predicted_variance=lowercase_ ) - -10.171_2790 < 1E-5 assert scheduler._get_variance(4_8_7 , predicted_variance=lowercase_ ) - -5.799_8052 < 1E-5 assert scheduler._get_variance(9_9_9 , predicted_variance=lowercase_ ) - -0.001_0011 < 1E-5 def __UpperCamelCase ( self ): _lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0] _lowerCAmelCase : List[Any] = self.get_scheduler_config() _lowerCAmelCase : Optional[Any] = scheduler_class(**lowercase_ ) _lowerCAmelCase : Union[str, Any] = scheduler.timesteps _lowerCAmelCase : List[Any] = self.dummy_model() _lowerCAmelCase : str = self.dummy_sample_deter _lowerCAmelCase : int = torch.manual_seed(0 ) for i, t in enumerate(lowercase_ ): # 1. predict noise residual _lowerCAmelCase : Optional[int] = model(lowercase_ , lowercase_ ) # 2. predict previous mean of sample x_t-1 _lowerCAmelCase : int = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample _lowerCAmelCase : List[str] = pred_prev_sample _lowerCAmelCase : List[str] = torch.sum(torch.abs(lowercase_ ) ) _lowerCAmelCase : List[Any] = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 2_5_2.2_6_8_2_4_9_5 ) < 1E-2 assert abs(result_mean.item() - 0.328_4743 ) < 1E-3 def __UpperCamelCase ( self ): _lowerCAmelCase : Optional[Any] = self.scheduler_classes[0] _lowerCAmelCase : Any = self.get_scheduler_config() _lowerCAmelCase : List[Any] = scheduler_class(**lowercase_ ) scheduler.set_timesteps(2_5 ) _lowerCAmelCase : Union[str, Any] = scheduler.timesteps _lowerCAmelCase : Tuple = self.dummy_model() _lowerCAmelCase : int = self.dummy_sample_deter _lowerCAmelCase : Optional[Any] = torch.manual_seed(0 ) for i, t in enumerate(lowercase_ ): # 1. predict noise residual _lowerCAmelCase : Tuple = model(lowercase_ , lowercase_ ) if i + 1 == timesteps.shape[0]: _lowerCAmelCase : int = None else: _lowerCAmelCase : Union[str, Any] = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 _lowerCAmelCase : str = scheduler.step( lowercase_ , lowercase_ , lowercase_ , prev_timestep=lowercase_ , generator=lowercase_ ).prev_sample _lowerCAmelCase : str = pred_prev_sample _lowerCAmelCase : Dict = torch.sum(torch.abs(lowercase_ ) ) _lowerCAmelCase : List[Any] = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 2_5_8.2_0_4_4_9_8_3 ) < 1E-2 assert abs(result_mean.item() - 0.336_2038 ) < 1E-3 def __UpperCamelCase ( self ): pass def __UpperCamelCase ( self ): pass
309
import unittest from datasets import load_dataset from transformers.pipelines import pipeline from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow @is_pipeline_test @require_torch class snake_case_ ( unittest.TestCase ): @require_torch def __UpperCamelCase ( self : Optional[int] ) -> List[Any]: lowercase__ : Union[str, Any] = pipeline( task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" ) lowercase__ : List[str] = load_dataset("ashraq/esc50" ) lowercase__ : List[Any] = dataset["train"]["audio"][-1]["array"] lowercase__ : Dict = audio_classifier(lowercase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] ) self.assertEqual( nested_simplify(lowercase_ ) , [{"score": 0.5_01, "label": "Sound of a dog"}, {"score": 0.4_99, "label": "Sound of vaccum cleaner"}] , ) @unittest.skip("No models are available in TF" ) def __UpperCamelCase ( self : str ) -> Optional[int]: pass @slow @require_torch def __UpperCamelCase ( self : List[str] ) -> int: lowercase__ : Tuple = pipeline( task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , ) # This is an audio of a dog lowercase__ : Union[str, Any] = load_dataset("ashraq/esc50" ) lowercase__ : Tuple = dataset["train"]["audio"][-1]["array"] lowercase__ : List[Any] = audio_classifier(lowercase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] ) self.assertEqual( nested_simplify(lowercase_ ) , [ {"score": 0.9_99, "label": "Sound of a dog"}, {"score": 0.0_01, "label": "Sound of vaccum cleaner"}, ] , ) lowercase__ : int = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] ) self.assertEqual( nested_simplify(lowercase_ ) , [ [ {"score": 0.9_99, "label": "Sound of a dog"}, {"score": 0.0_01, "label": "Sound of vaccum cleaner"}, ], ] * 5 , ) lowercase__ : Tuple = audio_classifier( [audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 ) self.assertEqual( nested_simplify(lowercase_ ) , [ [ {"score": 0.9_99, "label": "Sound of a dog"}, {"score": 0.0_01, "label": "Sound of vaccum cleaner"}, ], ] * 5 , ) @unittest.skip("No models are available in TF" ) def __UpperCamelCase ( self : Union[str, Any] ) -> Dict: pass
87
0
import operator def UpperCamelCase__( UpperCamelCase__ : list , UpperCamelCase__ : bool = False , UpperCamelCase__ : list | None = None )->Dict: A__ = operator.lt if reverse else operator.gt A__ = solution or [] if not arr: return solution A__ = [arr.pop(0 )] for i, item in enumerate(_lowerCamelCase ): if _operator(_lowerCamelCase , sublist[-1] ): sublist.append(_lowerCamelCase ) arr.pop(_lowerCamelCase ) # merging sublist into solution list if not solution: solution.extend(_lowerCamelCase ) else: while sublist: A__ = sublist.pop(0 ) for i, xx in enumerate(_lowerCamelCase ): if not _operator(_lowerCamelCase , _lowerCamelCase ): solution.insert(_lowerCamelCase , _lowerCamelCase ) break else: solution.append(_lowerCamelCase ) strand_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
193
import operator def lowercase_ ( _lowerCamelCase : list , _lowerCamelCase : bool = False , _lowerCamelCase : list | None = None): lowercase__ : int = operator.lt if reverse else operator.gt lowercase__ : str = solution or [] if not arr: return solution lowercase__ : List[str] = [arr.pop(0)] for i, item in enumerate(_lowerCamelCase): if _operator(_lowerCamelCase , sublist[-1]): sublist.append(_lowerCamelCase) arr.pop(_lowerCamelCase) # merging sublist into solution list if not solution: solution.extend(_lowerCamelCase) else: while sublist: lowercase__ : str = sublist.pop(0) for i, xx in enumerate(_lowerCamelCase): if not _operator(_lowerCamelCase , _lowerCamelCase): solution.insert(_lowerCamelCase , _lowerCamelCase) break else: solution.append(_lowerCamelCase) strand_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
87
0
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING __UpperCamelCase : Any = logging.get_logger(__name__) class lowercase__ ( __A): UpperCamelCase_ = "upernet" def __init__( self : List[str] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Optional[int]=512 , UpperCamelCase__ : str=0.02 , UpperCamelCase__ : Optional[int]=[1, 2, 3, 6] , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Dict=0.4 , UpperCamelCase__ : Optional[Any]=384 , UpperCamelCase__ : Optional[int]=256 , UpperCamelCase__ : int=1 , UpperCamelCase__ : str=False , UpperCamelCase__ : Optional[int]=255 , **UpperCamelCase__ : Union[str, Any] , ): '''simple docstring''' super().__init__(**lowercase_ ) if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) SCREAMING_SNAKE_CASE : str = CONFIG_MAPPING["resnet"](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] ) elif isinstance(lowercase_ , lowercase_ ): SCREAMING_SNAKE_CASE : List[Any] = backbone_config.get('''model_type''' ) SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAPPING[backbone_model_type] SCREAMING_SNAKE_CASE : Tuple = config_class.from_dict(lowercase_ ) SCREAMING_SNAKE_CASE : int = backbone_config SCREAMING_SNAKE_CASE : str = hidden_size SCREAMING_SNAKE_CASE : Dict = initializer_range SCREAMING_SNAKE_CASE : int = pool_scales SCREAMING_SNAKE_CASE : int = use_auxiliary_head SCREAMING_SNAKE_CASE : List[str] = auxiliary_loss_weight SCREAMING_SNAKE_CASE : Union[str, Any] = auxiliary_in_channels SCREAMING_SNAKE_CASE : Tuple = auxiliary_channels SCREAMING_SNAKE_CASE : List[Any] = auxiliary_num_convs SCREAMING_SNAKE_CASE : Tuple = auxiliary_concat_input SCREAMING_SNAKE_CASE : Optional[Any] = loss_ignore_index def __A ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE : Dict = self.backbone_config.to_dict() SCREAMING_SNAKE_CASE : List[Any] = self.__class__.model_type return output
182
import time import warnings from abc import ABC from copy import deepcopy from typing import Optional import torch from ..utils import add_start_docstrings, logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = R''' Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs (`Dict[str, Any]`, *optional*): Additional stopping criteria specific kwargs. Return: `bool`. `False` indicates we should continue, `True` indicates we should stop. ''' class snake_case_ ( __A ): @add_start_docstrings(lowercase_ ) def __call__( self : Optional[Any] , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : List[str] ) -> bool: raise NotImplementedError("StoppingCriteria needs to be subclassed" ) class snake_case_ ( __A ): def __init__( self : Dict , lowercase_ : int , lowercase_ : Optional[int] = None ) -> List[str]: lowercase__ : str = max_length lowercase__ : Optional[int] = max_position_embeddings @add_start_docstrings(lowercase_ ) def __call__( self : Tuple , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Union[str, Any] ) -> bool: lowercase__ : str = input_ids.shape[-1] lowercase__ : Any = cur_len >= self.max_length if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: logger.warning_once( "This is a friendly reminder - the current text generation call will exceed the model's predefined " F'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe ''' "exceptions, performance degradation, or nothing at all." ) return is_done class snake_case_ ( __A ): def __init__( self : Tuple , lowercase_ : int , lowercase_ : int ) -> List[str]: warnings.warn( "The class `MaxNewTokensCriteria` is deprecated. " F'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` ''' "with `max_length = start_length + max_new_tokens` instead." , lowercase_ , ) lowercase__ : Optional[int] = start_length lowercase__ : str = max_new_tokens lowercase__ : Tuple = start_length + max_new_tokens @add_start_docstrings(lowercase_ ) def __call__( self : List[Any] , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Dict ) -> bool: return input_ids.shape[-1] >= self.max_length class snake_case_ ( __A ): def __init__( self : Tuple , lowercase_ : float , lowercase_ : Optional[float] = None ) -> Dict: lowercase__ : List[str] = max_time lowercase__ : Tuple = time.time() if initial_timestamp is None else initial_timestamp @add_start_docstrings(lowercase_ ) def __call__( self : int , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Union[str, Any] ) -> bool: return time.time() - self.initial_timestamp > self.max_time class snake_case_ ( __A ): @add_start_docstrings(lowercase_ ) def __call__( self : str , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : List[str] ) -> bool: return any(criteria(lowercase_ , lowercase_ ) for criteria in self ) @property def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]: for stopping_criterium in self: if isinstance(lowercase_ , lowercase_ ): return stopping_criterium.max_length elif isinstance(lowercase_ , lowercase_ ): return stopping_criterium.max_length return None def lowercase_ ( _lowerCamelCase : StoppingCriteriaList , _lowerCamelCase : int): lowercase__ : Optional[int] = stopping_criteria.max_length lowercase__ : str = deepcopy(_lowerCamelCase) if stopping_max_length is not None and stopping_max_length != max_length: warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , _lowerCamelCase) elif stopping_max_length is None: new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowerCamelCase)) return new_stopping_criteria
87
0
from bisect import bisect from itertools import accumulate def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ): __SCREAMING_SNAKE_CASE : int = sorted(zip(_lowerCamelCase , _lowerCamelCase ) , key=lambda lowercase__ : x[0] / x[1] , reverse=_lowerCamelCase ) __SCREAMING_SNAKE_CASE : List[Any] = [i[0] for i in r], [i[1] for i in r] __SCREAMING_SNAKE_CASE : Union[str, Any] = list(accumulate(_lowerCamelCase ) ) __SCREAMING_SNAKE_CASE : Optional[int] = bisect(_lowerCamelCase , _lowerCamelCase ) return ( 0 if k == 0 else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k] ) ) if __name__ == "__main__": import doctest doctest.testmod()
9
from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def lowercase_ ( _lowerCamelCase : Dict[str, torch.Tensor]): lowercase__ : Any = [] lowercase__ : Optional[int] = [] lowercase__ : Tuple = [] for rt in rc.restypes: lowercase__ : Dict = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]] restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names]) lowercase__ : str = {name: i for i, name in enumerate(_lowerCamelCase)} restype_atomaa_to_atomaa_list.append( [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types]) restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names]) # Add dummy mapping for restype 'UNK' restype_atomaa_to_atomaa_list.append([0] * 14) restype_atomaa_to_atomaa_list.append([0] * 37) restype_atomaa_mask_list.append([0.0] * 14) lowercase__ : Union[str, Any] = torch.tensor( _lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , ) lowercase__ : str = torch.tensor( _lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , ) lowercase__ : List[str] = torch.tensor( _lowerCamelCase , dtype=torch.floataa , device=protein["aatype"].device , ) lowercase__ : str = protein["aatype"].to(torch.long) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein lowercase__ : Dict = restype_atomaa_to_atomaa[protein_aatype] lowercase__ : str = restype_atomaa_mask[protein_aatype] lowercase__ : List[Any] = residx_atomaa_mask lowercase__ : Optional[Any] = residx_atomaa_to_atomaa.long() # create the gather indices for mapping back lowercase__ : str = restype_atomaa_to_atomaa[protein_aatype] lowercase__ : str = residx_atomaa_to_atomaa.long() # create the corresponding mask lowercase__ : Optional[Any] = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["aatype"].device) for restype, restype_letter in enumerate(rc.restypes): lowercase__ : Tuple = rc.restype_atoa[restype_letter] lowercase__ : List[Any] = rc.residue_atoms[restype_name] for atom_name in atom_names: lowercase__ : Optional[int] = rc.atom_order[atom_name] lowercase__ : Tuple = 1 lowercase__ : Dict = restype_atomaa_mask[protein_aatype] lowercase__ : Any = residx_atomaa_mask return protein def lowercase_ ( _lowerCamelCase : Dict[str, torch.Tensor]): lowercase__ : Tuple = tree_map(lambda _lowerCamelCase: torch.tensor(_lowerCamelCase , device=batch["aatype"].device) , _lowerCamelCase , np.ndarray) lowercase__ : List[str] = tensor_tree_map(lambda _lowerCamelCase: np.array(_lowerCamelCase) , make_atomaa_masks(_lowerCamelCase)) return out
87
0
"""simple docstring""" import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available from . import BaseDiffusersCLICommand def __SCREAMING_SNAKE_CASE ( A_ ): return EnvironmentCommand() class SCREAMING_SNAKE_CASE ( __A ): """simple docstring""" @staticmethod def __lowerCAmelCase ( lowercase_ : ArgumentParser ): lowerCAmelCase__ : List[Any] = parser.add_parser('''env''' ) download_parser.set_defaults(func=lowercase_ ) def __lowerCAmelCase ( self : Union[str, Any] ): lowerCAmelCase__ : Optional[int] = huggingface_hub.__version__ lowerCAmelCase__ : List[Any] = "not installed" lowerCAmelCase__ : Optional[Any] = "NA" if is_torch_available(): import torch lowerCAmelCase__ : Optional[Any] = torch.__version__ lowerCAmelCase__ : Optional[Any] = torch.cuda.is_available() lowerCAmelCase__ : Union[str, Any] = "not installed" if is_transformers_available(): import transformers lowerCAmelCase__ : Any = transformers.__version__ lowerCAmelCase__ : str = "not installed" if is_accelerate_available(): import accelerate lowerCAmelCase__ : Optional[int] = accelerate.__version__ lowerCAmelCase__ : List[str] = "not installed" if is_xformers_available(): import xformers lowerCAmelCase__ : Optional[int] = xformers.__version__ lowerCAmelCase__ : List[str] = { "`diffusers` version": version, "Platform": platform.platform(), "Python version": platform.python_version(), "PyTorch version (GPU?)": F'{pt_version} ({pt_cuda_available})', "Huggingface_hub version": hub_version, "Transformers version": transformers_version, "Accelerate version": accelerate_version, "xFormers version": xformers_version, "Using GPU in script?": "<fill in>", "Using distributed or parallel set-up in script?": "<fill in>", } print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' ) print(self.format_dict(lowercase_ ) ) return info @staticmethod def __lowerCAmelCase ( lowercase_ : str ): return "\n".join([F'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
106
import unittest from transformers import BigBirdConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax from transformers.models.big_bird.modeling_flax_big_bird import ( FlaxBigBirdForCausalLM, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForPreTraining, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, FlaxBigBirdModel, ) class snake_case_ ( unittest.TestCase ): def __init__( self : Tuple , lowercase_ : List[Any] , lowercase_ : Union[str, Any]=2 , lowercase_ : Union[str, Any]=56 , lowercase_ : Tuple=True , lowercase_ : Optional[Any]=True , lowercase_ : Optional[Any]=True , lowercase_ : int=True , lowercase_ : Any=99 , lowercase_ : int=32 , lowercase_ : str=2 , lowercase_ : Union[str, Any]=2 , lowercase_ : Dict=7 , lowercase_ : Dict="gelu_new" , lowercase_ : Tuple=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : Tuple=5_12 , lowercase_ : Optional[Any]=16 , lowercase_ : List[Any]=2 , lowercase_ : Dict=0.02 , lowercase_ : int=4 , lowercase_ : Tuple="block_sparse" , lowercase_ : Dict=True , lowercase_ : Optional[int]=False , lowercase_ : Dict=2 , lowercase_ : int=3 , ) -> Union[str, Any]: lowercase__ : Dict = parent lowercase__ : Dict = batch_size lowercase__ : Tuple = seq_length lowercase__ : Dict = is_training lowercase__ : Dict = use_attention_mask lowercase__ : Tuple = use_token_type_ids lowercase__ : Optional[int] = use_labels lowercase__ : List[Any] = vocab_size lowercase__ : Any = hidden_size lowercase__ : List[Any] = num_hidden_layers lowercase__ : Union[str, Any] = num_attention_heads lowercase__ : str = intermediate_size lowercase__ : int = hidden_act lowercase__ : str = hidden_dropout_prob lowercase__ : List[str] = attention_probs_dropout_prob lowercase__ : Optional[Any] = max_position_embeddings lowercase__ : Union[str, Any] = type_vocab_size lowercase__ : Dict = type_sequence_label_size lowercase__ : Any = initializer_range lowercase__ : List[str] = num_choices lowercase__ : str = rescale_embeddings lowercase__ : Optional[Any] = attention_type lowercase__ : Optional[int] = use_bias lowercase__ : Optional[int] = block_size lowercase__ : str = num_random_blocks def __UpperCamelCase ( self : str ) -> Optional[Any]: lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ : str = None if self.use_attention_mask: lowercase__ : Any = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ : Optional[int] = None if self.use_token_type_ids: lowercase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase__ : int = BigBirdConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , ) return config, input_ids, token_type_ids, attention_mask def __UpperCamelCase ( self : Union[str, Any] ) -> int: lowercase__ : int = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs lowercase__ : Union[str, Any] = { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_flax class snake_case_ ( __A ,unittest.TestCase ): __A : Optional[int] = ( ( FlaxBigBirdForCausalLM, FlaxBigBirdModel, FlaxBigBirdForPreTraining, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, ) if is_flax_available() else () ) __A : List[str] = False __A : Any = False def __UpperCamelCase ( self : List[str] ) -> List[Any]: lowercase__ : Union[str, Any] = FlaxBigBirdModelTester(self ) @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : Optional[int] ) -> Dict: super().test_from_pretrained_save_pretrained() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : List[str] ) -> Any: super().test_from_pretrained_with_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : Tuple ) -> str: super().test_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : Dict ) -> Union[str, Any]: super().test_hidden_states_output() @slow def __UpperCamelCase ( self : Optional[int] ) -> Tuple: for model_class_name in self.all_model_classes: lowercase__ : Optional[Any] = model_class_name.from_pretrained("google/bigbird-roberta-base" ) self.assertIsNotNone(lowercase_ ) def __UpperCamelCase ( self : int ) -> Optional[int]: if self.test_attn_probs: super().test_attention_outputs() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : str ) -> Any: lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowercase__ : Union[str, Any] = self._prepare_for_class(lowercase_ , lowercase_ ) lowercase__ : Optional[Any] = model_class(lowercase_ ) @jax.jit def model_jitted(lowercase_ : Tuple , lowercase_ : int=None , **lowercase_ : Dict ): return model(input_ids=lowercase_ , attention_mask=lowercase_ , **lowercase_ ) with self.subTest("JIT Enabled" ): lowercase__ : int = model_jitted(**lowercase_ ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): lowercase__ : Any = model_jitted(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) ) for jitted_output, output in zip(lowercase_ , lowercase_ ): self.assertEqual(jitted_output.shape , output.shape ) def __UpperCamelCase ( self : List[Any] , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : List[Any]=1E-5 , lowercase_ : Any="outputs" , lowercase_ : List[str]=None ) -> List[Any]: # `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version, # an effort was done to return `attention_probs` (yet to be verified). if name.startswith("outputs.attentions" ): return else: super().check_pt_flax_outputs(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
87
0
import contextlib import copy import random from typing import Any, Dict, Iterable, Optional, Union import numpy as np import torch from .utils import deprecate, is_transformers_available if is_transformers_available(): import transformers def UpperCamelCase ( __magic_name__ : int ) -> List[str]: """simple docstring""" random.seed(_lowerCamelCase ) np.random.seed(_lowerCamelCase ) torch.manual_seed(_lowerCamelCase ) torch.cuda.manual_seed_all(_lowerCamelCase ) # ^^ safe to call this function even if cuda is not available class A : '''simple docstring''' def __init__(self : str , _UpperCAmelCase : Iterable[torch.nn.Parameter] , _UpperCAmelCase : float = 0.9_999 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : int = 0 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Union[float, int] = 1.0 , _UpperCAmelCase : Union[float, int] = 2 / 3 , _UpperCAmelCase : Optional[Any] = None , _UpperCAmelCase : Dict[str, Any] = None , **_UpperCAmelCase : str , ) -> str: """simple docstring""" if isinstance(lowercase_ , torch.nn.Module ): lowercase__ = ( "Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. " "Please pass the parameters of the module instead." ) deprecate( """passing a `torch.nn.Module` to `ExponentialMovingAverage`""" , """1.0.0""" , lowercase_ , standard_warn=lowercase_ , ) lowercase__ = parameters.parameters() # set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility lowercase__ = True if kwargs.get("""max_value""" , lowercase_ ) is not None: lowercase__ = "The `max_value` argument is deprecated. Please use `decay` instead." deprecate("""max_value""" , """1.0.0""" , lowercase_ , standard_warn=lowercase_ ) lowercase__ = kwargs["max_value"] if kwargs.get("""min_value""" , lowercase_ ) is not None: lowercase__ = "The `min_value` argument is deprecated. Please use `min_decay` instead." deprecate("""min_value""" , """1.0.0""" , lowercase_ , standard_warn=lowercase_ ) lowercase__ = kwargs["min_value"] lowercase__ = list(lowercase_ ) lowercase__ = [p.clone().detach() for p in parameters] if kwargs.get("""device""" , lowercase_ ) is not None: lowercase__ = "The `device` argument is deprecated. Please use `to` instead." deprecate("""device""" , """1.0.0""" , lowercase_ , standard_warn=lowercase_ ) self.to(device=kwargs["""device"""] ) lowercase__ = None lowercase__ = decay lowercase__ = min_decay lowercase__ = update_after_step lowercase__ = use_ema_warmup lowercase__ = inv_gamma lowercase__ = power lowercase__ = 0 lowercase__ = None # set in `step()` lowercase__ = model_cls lowercase__ = model_config @classmethod def lowerCamelCase__ (cls : Any , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] ) -> "EMAModel": """simple docstring""" lowercase__ = model_cls.load_config(lowercase_ , return_unused_kwargs=lowercase_ ) lowercase__ = model_cls.from_pretrained(lowercase_ ) lowercase__ = cls(model.parameters() , model_cls=lowercase_ , model_config=model.config ) ema_model.load_state_dict(lowercase_ ) return ema_model def lowerCamelCase__ (self : int , _UpperCAmelCase : List[str] ) -> Tuple: """simple docstring""" if self.model_cls is None: raise ValueError("""`save_pretrained` can only be used if `model_cls` was defined at __init__.""" ) if self.model_config is None: raise ValueError("""`save_pretrained` can only be used if `model_config` was defined at __init__.""" ) lowercase__ = self.model_cls.from_config(self.model_config ) lowercase__ = self.state_dict() state_dict.pop("""shadow_params""" , lowercase_ ) model.register_to_config(**lowercase_ ) self.copy_to(model.parameters() ) model.save_pretrained(lowercase_ ) def lowerCamelCase__ (self : Any , _UpperCAmelCase : int ) -> float: """simple docstring""" lowercase__ = max(0 , optimization_step - self.update_after_step - 1 ) if step <= 0: return 0.0 if self.use_ema_warmup: lowercase__ = 1 - (1 + step / self.inv_gamma) ** -self.power else: lowercase__ = (1 + step) / (10 + step) lowercase__ = min(lowercase_ , self.decay ) # make sure decay is not smaller than min_decay lowercase__ = max(lowercase_ , self.min_decay ) return cur_decay_value @torch.no_grad() def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : Iterable[torch.nn.Parameter] ) -> int: """simple docstring""" if isinstance(lowercase_ , torch.nn.Module ): lowercase__ = ( "Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. " "Please pass the parameters of the module instead." ) deprecate( """passing a `torch.nn.Module` to `ExponentialMovingAverage.step`""" , """1.0.0""" , lowercase_ , standard_warn=lowercase_ , ) lowercase__ = parameters.parameters() lowercase__ = list(lowercase_ ) self.optimization_step += 1 # Compute the decay factor for the exponential moving average. lowercase__ = self.get_decay(self.optimization_step ) lowercase__ = decay lowercase__ = 1 - decay lowercase__ = contextlib.nullcontext if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): import deepspeed for s_param, param in zip(self.shadow_params , lowercase_ ): if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): lowercase__ = deepspeed.zero.GatheredParameters(lowercase_ , modifier_rank=lowercase_ ) with context_manager(): if param.requires_grad: s_param.sub_(one_minus_decay * (s_param - param) ) else: s_param.copy_(lowercase_ ) def lowerCamelCase__ (self : Dict , _UpperCAmelCase : Iterable[torch.nn.Parameter] ) -> None: """simple docstring""" lowercase__ = list(lowercase_ ) for s_param, param in zip(self.shadow_params , lowercase_ ): param.data.copy_(s_param.to(param.device ).data ) def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : List[Any]=None ) -> None: """simple docstring""" lowercase__ = [ p.to(device=lowercase_ , dtype=lowercase_ ) if p.is_floating_point() else p.to(device=lowercase_ ) for p in self.shadow_params ] def lowerCamelCase__ (self : Dict ) -> dict: """simple docstring""" return { "decay": self.decay, "min_decay": self.min_decay, "optimization_step": self.optimization_step, "update_after_step": self.update_after_step, "use_ema_warmup": self.use_ema_warmup, "inv_gamma": self.inv_gamma, "power": self.power, "shadow_params": self.shadow_params, } def lowerCamelCase__ (self : Tuple , _UpperCAmelCase : Iterable[torch.nn.Parameter] ) -> None: """simple docstring""" lowercase__ = [param.detach().cpu().clone() for param in parameters] def lowerCamelCase__ (self : str , _UpperCAmelCase : Iterable[torch.nn.Parameter] ) -> None: """simple docstring""" if self.temp_stored_params is None: raise RuntimeError("""This ExponentialMovingAverage has no `store()`ed weights """ """to `restore()`""" ) for c_param, param in zip(self.temp_stored_params , lowercase_ ): param.data.copy_(c_param.data ) # Better memory-wise. lowercase__ = None def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : dict ) -> None: """simple docstring""" lowercase__ = copy.deepcopy(lowercase_ ) lowercase__ = state_dict.get("""decay""" , self.decay ) if self.decay < 0.0 or self.decay > 1.0: raise ValueError("""Decay must be between 0 and 1""" ) lowercase__ = state_dict.get("""min_decay""" , self.min_decay ) if not isinstance(self.min_decay , lowercase_ ): raise ValueError("""Invalid min_decay""" ) lowercase__ = state_dict.get("""optimization_step""" , self.optimization_step ) if not isinstance(self.optimization_step , lowercase_ ): raise ValueError("""Invalid optimization_step""" ) lowercase__ = state_dict.get("""update_after_step""" , self.update_after_step ) if not isinstance(self.update_after_step , lowercase_ ): raise ValueError("""Invalid update_after_step""" ) lowercase__ = state_dict.get("""use_ema_warmup""" , self.use_ema_warmup ) if not isinstance(self.use_ema_warmup , lowercase_ ): raise ValueError("""Invalid use_ema_warmup""" ) lowercase__ = state_dict.get("""inv_gamma""" , self.inv_gamma ) if not isinstance(self.inv_gamma , (float, int) ): raise ValueError("""Invalid inv_gamma""" ) lowercase__ = state_dict.get("""power""" , self.power ) if not isinstance(self.power , (float, int) ): raise ValueError("""Invalid power""" ) lowercase__ = state_dict.get("""shadow_params""" , lowercase_ ) if shadow_params is not None: lowercase__ = shadow_params if not isinstance(self.shadow_params , lowercase_ ): raise ValueError("""shadow_params must be a list""" ) if not all(isinstance(lowercase_ , torch.Tensor ) for p in self.shadow_params ): raise ValueError("""shadow_params must all be Tensors""" )
305
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCamelCase = { '''configuration_groupvit''': [ '''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GroupViTConfig''', '''GroupViTOnnxConfig''', '''GroupViTTextConfig''', '''GroupViTVisionConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GroupViTModel''', '''GroupViTPreTrainedModel''', '''GroupViTTextModel''', '''GroupViTVisionModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFGroupViTModel''', '''TFGroupViTPreTrainedModel''', '''TFGroupViTTextModel''', '''TFGroupViTVisionModel''', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
87
0
_lowercase : Optional[Any] =range(2, 20 + 1) _lowercase : Optional[Any] =[10**k for k in range(ks[-1] + 1)] _lowercase : int ={} def lowerCAmelCase_ ( _lowercase : str , _lowercase : Optional[Any] , _lowercase : List[str] , _lowercase : Any) -> List[Any]: """simple docstring""" a__ : List[str] = sum(a_i[j] for j in range(_lowerCamelCase , len(_lowerCamelCase))) a__ : Tuple = sum(a_i[j] * base[j] for j in range(min(len(_lowerCamelCase) , _lowerCamelCase))) a__ : str = 0, 0 a__ : Optional[Any] = n - i a__ : Tuple = memo.get(_lowerCamelCase) if sub_memo is not None: a__ : List[str] = sub_memo.get(_lowerCamelCase) if jumps is not None and len(_lowerCamelCase) > 0: # find and make the largest jump without going over a__ : Optional[int] = -1 for _k in range(len(_lowerCamelCase) - 1 , -1 , -1): if jumps[_k][2] <= k and jumps[_k][1] <= max_dn: a__ : Optional[int] = _k break if max_jump >= 0: a__ : List[str] = jumps[max_jump] # since the difference between jumps is cached, add c a__ : int = diff + c for j in range(min(_lowerCamelCase , len(_lowerCamelCase))): a__ : List[str] = divmod(_lowerCamelCase , 10) if new_c > 0: add(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) else: a__ : Optional[int] = [] else: a__ : int = {c: []} a__ : Tuple = sub_memo if dn >= max_dn or c + diff >= base[k]: return diff, dn if k > ks[0]: while True: # keep doing smaller jumps a__ : Union[str, Any] = next_term(_lowerCamelCase , k - 1 , i + dn , _lowerCamelCase) diff += _diff dn += terms_jumped if dn >= max_dn or c + diff >= base[k]: break else: # would be too small a jump, just compute sequential terms instead a__ : List[str] = compute(_lowerCamelCase , _lowerCamelCase , i + dn , _lowerCamelCase) diff += _diff dn += terms_jumped a__ : Optional[int] = sub_memo[c] # keep jumps sorted by # of terms skipped a__ : Dict = 0 while j < len(_lowerCamelCase): if jumps[j][1] > dn: break j += 1 # cache the jump for this value digitsum(b) and c sub_memo[c].insert(_lowerCamelCase , (diff, dn, k)) return (diff, dn) def lowerCAmelCase_ ( _lowercase : Tuple , _lowercase : Any , _lowercase : Tuple , _lowercase : List[Any]) -> Dict: """simple docstring""" if i >= n: return 0, i if k > len(_lowerCamelCase): a_i.extend([0 for _ in range(k - len(_lowerCamelCase))]) # note: a_i -> b * 10^k + c # ds_b -> digitsum(b) # ds_c -> digitsum(c) a__ : Dict = i a__ : Optional[Any] = 0, 0, 0 for j in range(len(_lowerCamelCase)): if j >= k: ds_b += a_i[j] else: ds_c += a_i[j] while i < n: i += 1 a__ : Union[str, Any] = ds_c + ds_b diff += addend a__ : Union[str, Any] = 0 for j in range(_lowerCamelCase): a__ : Any = a_i[j] + addend a__ : Optional[Any] = divmod(_lowerCamelCase , 10) ds_c += a_i[j] if addend > 0: break if addend > 0: add(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) return diff, i - start_i def lowerCAmelCase_ ( _lowercase : Union[str, Any] , _lowercase : List[str] , _lowercase : List[Any]) -> int: """simple docstring""" for j in range(_lowerCamelCase , len(_lowerCamelCase)): a__ : Tuple = digits[j] + addend if s >= 10: a__ : List[Any] = divmod(_lowerCamelCase , 10) a__ : Dict = addend // 10 + quotient else: a__ : Union[str, Any] = s a__ : Optional[int] = addend // 10 if addend == 0: break while addend > 0: a__ : int = divmod(_lowerCamelCase , 10) digits.append(_lowerCamelCase) def lowerCAmelCase_ ( _lowercase : int = 10**15) -> Tuple: """simple docstring""" a__ : str = [1] a__ : Dict = 1 a__ : Optional[int] = 0 while True: a__ : Any = next_term(_lowerCamelCase , 20 , i + dn , _lowerCamelCase) dn += terms_jumped if dn == n - i: break a__ : int = 0 for j in range(len(_lowerCamelCase)): a_n += digits[j] * 10**j return a_n if __name__ == "__main__": print(f'{solution() = }')
170
import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : int): assert isinstance(_lowerCamelCase , _lowerCamelCase) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True]) def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : str): lowercase__ : Optional[int] = tmp_path / "cache" lowercase__ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase).read() _check_json_dataset(_lowerCamelCase , _lowerCamelCase) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict , _lowerCamelCase : Dict): lowercase__ : List[Any] = tmp_path / "cache" lowercase__ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : List[Any] = features.copy() if features else default_expected_features lowercase__ : List[Any] = ( Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None ) lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read() _check_json_dataset(_lowerCamelCase , _lowerCamelCase) @pytest.mark.parametrize( "features" , [ None, {"col_3": "float64", "col_1": "string", "col_2": "int64"}, ] , ) def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : List[str]): lowercase__ : Optional[Any] = tmp_path / "cache" lowercase__ : Tuple = {"col_3": "float64", "col_1": "string", "col_2": "int64"} lowercase__ : List[Any] = features.copy() if features else default_expected_features lowercase__ : int = ( Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None ) lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read() assert isinstance(_lowerCamelCase , _lowerCamelCase) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int]): # jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"} lowercase__ : Any = {"col_2": "int64", "col_3": "float64", "col_1": "string"} lowercase__ : str = features.copy() lowercase__ : str = ( Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None ) lowercase__ : Optional[int] = tmp_path / "cache" lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read() assert isinstance(_lowerCamelCase , _lowerCamelCase) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("split" , [None, NamedSplit("train"), "train", "test"]) def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str]): lowercase__ : Union[str, Any] = tmp_path / "cache" lowercase__ : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , split=_lowerCamelCase).read() _check_json_dataset(_lowerCamelCase , _lowerCamelCase) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list]) def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int): if issubclass(_lowerCamelCase , _lowerCamelCase): lowercase__ : Tuple = jsonl_path elif issubclass(_lowerCamelCase , _lowerCamelCase): lowercase__ : str = [jsonl_path] lowercase__ : str = tmp_path / "cache" lowercase__ : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : Tuple = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase).read() _check_json_dataset(_lowerCamelCase , _lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int]=("train",)): assert isinstance(_lowerCamelCase , _lowerCamelCase) for split in splits: lowercase__ : Optional[Any] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True]) def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : str): lowercase__ : List[str] = tmp_path / "cache" lowercase__ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowercase__ : Optional[Any] = JsonDatasetReader({"train": jsonl_path} , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase).read() _check_json_datasetdict(_lowerCamelCase , _lowerCamelCase) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : List[str]): lowercase__ : str = tmp_path / "cache" lowercase__ : Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : Tuple = features.copy() if features else default_expected_features lowercase__ : Union[str, Any] = ( Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None ) lowercase__ : Tuple = JsonDatasetReader({"train": jsonl_path} , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read() _check_json_datasetdict(_lowerCamelCase , _lowerCamelCase) @pytest.mark.parametrize("split" , [None, NamedSplit("train"), "train", "test"]) def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Tuple): if split: lowercase__ : Tuple = {split: jsonl_path} else: lowercase__ : Tuple = "train" lowercase__ : int = {"train": jsonl_path, "test": jsonl_path} lowercase__ : Dict = tmp_path / "cache" lowercase__ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase).read() _check_json_datasetdict(_lowerCamelCase , _lowerCamelCase , splits=list(path.keys())) assert all(dataset[split].split == split for split in path.keys()) def lowercase_ ( _lowerCamelCase : Union[str, Any]): return json.load(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Optional[int]): return [json.loads(_lowerCamelCase) for line in buffer] class snake_case_ : @pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] ) def __UpperCamelCase ( self : List[Any] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Optional[Any]: with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ ).write() buffer.seek(0 ) lowercase__ : Optional[int] = load_json_function(lowercase_ ) assert isinstance(lowercase_ , lowercase_ ) assert isinstance(exported_content[0] , lowercase_ ) assert len(lowercase_ ) == 10 @pytest.mark.parametrize( "orient, container, keys, len_at" , [ ("records", list, {"tokens", "labels", "answers", "id"}, None), ("split", dict, {"columns", "data"}, "data"), ("index", dict, set("0123456789" ), None), ("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"), ("values", list, None, None), ("table", dict, {"schema", "data"}, "data"), ] , ) def __UpperCamelCase ( self : str , lowercase_ : int , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Tuple ) -> List[str]: with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , orient=lowercase_ ).write() buffer.seek(0 ) lowercase__ : str = load_json(lowercase_ ) assert isinstance(lowercase_ , lowercase_ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(lowercase_ , "keys" ) and not hasattr(exported_content[0] , "keys" ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(lowercase_ ) == 10 @pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] ) def __UpperCamelCase ( self : List[Any] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Optional[int]: with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , num_proc=2 ).write() buffer.seek(0 ) lowercase__ : str = load_json_function(lowercase_ ) assert isinstance(lowercase_ , lowercase_ ) assert isinstance(exported_content[0] , lowercase_ ) assert len(lowercase_ ) == 10 @pytest.mark.parametrize( "orient, container, keys, len_at" , [ ("records", list, {"tokens", "labels", "answers", "id"}, None), ("split", dict, {"columns", "data"}, "data"), ("index", dict, set("0123456789" ), None), ("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"), ("values", list, None, None), ("table", dict, {"schema", "data"}, "data"), ] , ) def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Any: with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , orient=lowercase_ , num_proc=2 ).write() buffer.seek(0 ) lowercase__ : Optional[Any] = load_json(lowercase_ ) assert isinstance(lowercase_ , lowercase_ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(lowercase_ , "keys" ) and not hasattr(exported_content[0] , "keys" ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(lowercase_ ) == 10 def __UpperCamelCase ( self : Dict , lowercase_ : List[str] ) -> str: with pytest.raises(lowercase_ ): with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , num_proc=0 ) @pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] ) def __UpperCamelCase ( self : List[Any] , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : List[Any] ) -> Any: lowercase__ : Dict = tmp_path_factory.mktemp("data" ) / F'''test.json.{extension}''' lowercase__ : Optional[int] = str(shared_datadir / F'''test_file.json.{extension}''' ) JsonDatasetWriter(lowercase_ , lowercase_ , compression=lowercase_ ).write() with fsspec.open(lowercase_ , "rb" , compression="infer" ) as f: lowercase__ : List[Any] = f.read() with fsspec.open(lowercase_ , "rb" , compression="infer" ) as f: lowercase__ : str = f.read() assert exported_content == original_content
87
0
'''simple docstring''' import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class lowercase_ ( __A ): __UpperCAmelCase = "M-CLIP" def __init__( self , a=10_24 , a=7_68 , **a ): UpperCamelCase__ = transformerDimSize UpperCamelCase__ = imageDimSize super().__init__(**lowercase_ ) class lowercase_ ( __A ): __UpperCAmelCase = MCLIPConfig def __init__( self , a , *a , **a ): super().__init__(lowercase_ , *lowercase_ , **lowercase_ ) UpperCamelCase__ = XLMRobertaModel(lowercase_ ) UpperCamelCase__ = torch.nn.Linear( in_features=config.transformerDimensions , out_features=config.numDims ) def __a ( self , a , a ): UpperCamelCase__ = self.transformer(input_ids=lowercase_ , attention_mask=lowercase_ )[0] UpperCamelCase__ = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None] return self.LinearTransformation(lowercase_ ), embs
80
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class snake_case_ ( __A ): __A : Optional[Any] = ["image_processor", "tokenizer"] __A : Tuple = "LayoutLMv3ImageProcessor" __A : List[Any] = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast") def __init__( self : Union[str, Any] , lowercase_ : int=None , lowercase_ : str=None , **lowercase_ : Optional[Any] ) -> Optional[int]: lowercase__ : Union[str, Any] = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , lowercase_ , ) lowercase__ : Optional[int] = kwargs.pop("feature_extractor" ) lowercase__ : int = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(lowercase_ , lowercase_ ) def __call__( self : Dict , lowercase_ : Optional[Any] , lowercase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase_ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowercase_ : Union[List[List[int]], List[List[List[int]]]] = None , lowercase_ : Optional[Union[List[int], List[List[int]]]] = None , lowercase_ : bool = True , lowercase_ : Union[bool, str, PaddingStrategy] = False , lowercase_ : Union[bool, str, TruncationStrategy] = None , lowercase_ : Optional[int] = None , lowercase_ : int = 0 , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = True , lowercase_ : Optional[Union[str, TensorType]] = None , **lowercase_ : Dict , ) -> BatchEncoding: # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( "You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( "You cannot provide word labels if you initialized the image processor with apply_ocr set to True." ) # first, apply the image processor lowercase__ : Union[str, Any] = self.image_processor(images=lowercase_ , return_tensors=lowercase_ ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(lowercase_ , lowercase_ ): lowercase__ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension) lowercase__ : Any = features["words"] lowercase__ : Tuple = self.tokenizer( text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , ) # add pixel values lowercase__ : Optional[int] = features.pop("pixel_values" ) if return_overflowing_tokens is True: lowercase__ : Dict = self.get_overflowing_images(lowercase_ , encoded_inputs["overflow_to_sample_mapping"] ) lowercase__ : str = images return encoded_inputs def __UpperCamelCase ( self : List[Any] , lowercase_ : Tuple , lowercase_ : List[Any] ) -> Dict: # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image lowercase__ : Tuple = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(lowercase_ ) != len(lowercase_ ): raise ValueError( "Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got" F''' {len(lowercase_ )} and {len(lowercase_ )}''' ) return images_with_overflow def __UpperCamelCase ( self : int , *lowercase_ : Union[str, Any] , **lowercase_ : List[str] ) -> Union[str, Any]: return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : Union[str, Any] , *lowercase_ : str , **lowercase_ : int ) -> Dict: return self.tokenizer.decode(*lowercase_ , **lowercase_ ) @property def __UpperCamelCase ( self : Any ) -> Any: return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase_ , ) return self.image_processor_class @property def __UpperCamelCase ( self : List[Any] ) -> Tuple: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase_ , ) return self.image_processor
87
0
import fire from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoTokenizer from utils import SeqaSeqDataset, pickle_save def a_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int=1_024 , SCREAMING_SNAKE_CASE__ : List[Any]=1_024 , SCREAMING_SNAKE_CASE__ : List[Any]=False , **SCREAMING_SNAKE_CASE__ : Optional[int] ): '''simple docstring''' _lowerCamelCase : Optional[Any] =AutoTokenizer.from_pretrained(_lowerCamelCase ) _lowerCamelCase : Optional[Any] =SeqaSeqDataset(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , type_path='train' , **_lowerCamelCase ) _lowerCamelCase : Union[str, Any] =tok.pad_token_id def get_lens(SCREAMING_SNAKE_CASE__ : Optional[Any] ): _lowerCamelCase : Optional[int] =tqdm( DataLoader(_lowerCamelCase , batch_size=512 , num_workers=8 , shuffle=_lowerCamelCase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , ) _lowerCamelCase : List[str] =[] for batch in dl: _lowerCamelCase : Any =batch["input_ids"].ne(_lowerCamelCase ).sum(1 ).tolist() _lowerCamelCase : List[str] =batch["labels"].ne(_lowerCamelCase ).sum(1 ).tolist() if consider_target: for src, tgt in zip(_lowerCamelCase , _lowerCamelCase ): max_lens.append(max(_lowerCamelCase , _lowerCamelCase ) ) else: max_lens.extend(_lowerCamelCase ) return max_lens _lowerCamelCase : List[Any] =get_lens(_lowerCamelCase ) _lowerCamelCase : List[Any] =SeqaSeqDataset(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , type_path='val' , **_lowerCamelCase ) _lowerCamelCase : int =get_lens(_lowerCamelCase ) pickle_save(_lowerCamelCase , train_ds.len_file ) pickle_save(_lowerCamelCase , val_ds.len_file ) if __name__ == "__main__": fire.Fire(save_len_file)
199
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging UpperCamelCase = logging.get_logger(__name__) if is_vision_available(): import PIL class snake_case_ ( __A ): __A : str = ["pixel_values"] def __init__( self : int , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 2_55 , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = True , **lowercase_ : Union[str, Any] , ) -> None: super().__init__(**lowercase_ ) lowercase__ : Tuple = size if size is not None else {"shortest_edge": 2_24} lowercase__ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_ ) lowercase__ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24} lowercase__ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_ , param_name="crop_size" ) lowercase__ : Dict = do_resize lowercase__ : List[Any] = size lowercase__ : int = resample lowercase__ : Union[str, Any] = do_center_crop lowercase__ : Optional[int] = crop_size lowercase__ : List[str] = do_rescale lowercase__ : int = rescale_factor lowercase__ : List[Any] = do_normalize lowercase__ : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN lowercase__ : str = image_std if image_std is not None else OPENAI_CLIP_STD lowercase__ : Dict = do_convert_rgb def __UpperCamelCase ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Union[str, Any] , ) -> np.ndarray: lowercase__ : str = get_size_dict(lowercase_ , default_to_square=lowercase_ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) lowercase__ : Dict = get_resize_output_image_size(lowercase_ , size=size["shortest_edge"] , default_to_square=lowercase_ ) return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : int , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : int , ) -> np.ndarray: lowercase__ : Optional[Any] = get_size_dict(lowercase_ ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(lowercase_ , size=(size["height"], size["width"]) , data_format=lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[Any] , ) -> Any: return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : str , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : str , ) -> np.ndarray: return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : Optional[Any] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : int = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **lowercase_ : Union[str, Any] , ) -> PIL.Image.Image: lowercase__ : int = do_resize if do_resize is not None else self.do_resize lowercase__ : Dict = size if size is not None else self.size lowercase__ : List[Any] = get_size_dict(lowercase_ , param_name="size" , default_to_square=lowercase_ ) lowercase__ : Dict = resample if resample is not None else self.resample lowercase__ : int = do_center_crop if do_center_crop is not None else self.do_center_crop lowercase__ : Dict = crop_size if crop_size is not None else self.crop_size lowercase__ : List[str] = get_size_dict(lowercase_ , param_name="crop_size" , default_to_square=lowercase_ ) lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale lowercase__ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase__ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize lowercase__ : int = image_mean if image_mean is not None else self.image_mean lowercase__ : List[str] = image_std if image_std is not None else self.image_std lowercase__ : Union[str, Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowercase__ : Union[str, Any] = make_list_of_images(lowercase_ ) if not valid_images(lowercase_ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowercase__ : Dict = [convert_to_rgb(lowercase_ ) for image in images] # All transformations expect numpy arrays. lowercase__ : Optional[Any] = [to_numpy_array(lowercase_ ) for image in images] if do_resize: lowercase__ : List[Any] = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images] if do_center_crop: lowercase__ : int = [self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images] if do_rescale: lowercase__ : str = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images] if do_normalize: lowercase__ : Optional[int] = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images] lowercase__ : Optional[Any] = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images] lowercase__ : List[str] = {"pixel_values": images} return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
87
0
"""simple docstring""" import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def lowercase ( __snake_case : Features ): lowercase_ : List[Any] = np.inf def set_batch_size(__snake_case : FeatureType ) -> None: nonlocal batch_size if isinstance(_lowerCamelCase , _lowerCamelCase ): lowercase_ : Any = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(_lowerCamelCase , _lowerCamelCase ): lowercase_ : Union[str, Any] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(_lowerCamelCase , _lowerCamelCase ) and feature.dtype == "binary": lowercase_ : Dict = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(_lowerCamelCase , _lowerCamelCase ) return None if batch_size is np.inf else batch_size class _UpperCAmelCase ( __A ): def __init__( self : List[str] , A : NestedDataStructureLike[PathLike] , A : Optional[NamedSplit] = None , A : Optional[Features] = None , A : str = None , A : bool = False , A : bool = False , A : Optional[int] = None , **A : Union[str, Any] , ) -> Union[str, Any]: super().__init__( lowercase_ , split=lowercase_ , features=lowercase_ , cache_dir=lowercase_ , keep_in_memory=lowercase_ , streaming=lowercase_ , num_proc=lowercase_ , **lowercase_ , ) lowercase_ : List[str] = path_or_paths if isinstance(lowercase_ , lowercase_ ) else {self.split: path_or_paths} lowercase_ : Optional[int] = _PACKAGED_DATASETS_MODULES["parquet"][1] lowercase_ : Union[str, Any] = Parquet( cache_dir=lowercase_ , data_files=lowercase_ , features=lowercase_ , hash=lowercase_ , **lowercase_ , ) def A ( self : Tuple ) -> Dict: # Build iterable dataset if self.streaming: lowercase_ : List[Any] = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: lowercase_ : Any = None lowercase_ : int = None lowercase_ : Dict = None lowercase_ : Union[str, Any] = None self.builder.download_and_prepare( download_config=lowercase_ , download_mode=lowercase_ , verification_mode=lowercase_ , base_path=lowercase_ , num_proc=self.num_proc , ) lowercase_ : Optional[Any] = self.builder.as_dataset( split=self.split , verification_mode=lowercase_ , in_memory=self.keep_in_memory ) return dataset class _UpperCAmelCase : def __init__( self : str , A : Dataset , A : Union[PathLike, BinaryIO] , A : Optional[int] = None , **A : Optional[Any] , ) -> int: lowercase_ : Dict = dataset lowercase_ : List[Any] = path_or_buf lowercase_ : Any = batch_size or get_writer_batch_size(dataset.features ) lowercase_ : Any = parquet_writer_kwargs def A ( self : List[Any] ) -> int: lowercase_ : Tuple = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with open(self.path_or_buf , '''wb+''' ) as buffer: lowercase_ : str = self._write(file_obj=lowercase_ , batch_size=lowercase_ , **self.parquet_writer_kwargs ) else: lowercase_ : Tuple = self._write(file_obj=self.path_or_buf , batch_size=lowercase_ , **self.parquet_writer_kwargs ) return written def A ( self : Union[str, Any] , A : BinaryIO , A : int , **A : Optional[int] ) -> int: lowercase_ : List[Any] = 0 lowercase_ : List[Any] = parquet_writer_kwargs.pop('''path_or_buf''' , lowercase_ ) lowercase_ : List[Any] = self.dataset.features.arrow_schema lowercase_ : Optional[Any] = pq.ParquetWriter(lowercase_ , schema=lowercase_ , **lowercase_ ) for offset in logging.tqdm( range(0 , len(self.dataset ) , lowercase_ ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ): lowercase_ : Any = query_table( table=self.dataset._data , key=slice(lowercase_ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(lowercase_ ) written += batch.nbytes writer.close() return written
33
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available UpperCamelCase = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''GPTSw3Tokenizer'''] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_swa import GPTSwaTokenizer else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
87
0
'''simple docstring''' A__: List[Any] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] A__: str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] A__: Tuple = { 0: '''Sunday''', 1: '''Monday''', 2: '''Tuesday''', 3: '''Wednesday''', 4: '''Thursday''', 5: '''Friday''', 6: '''Saturday''', } def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ) -> Any: assert len(str(_lowerCamelCase ) ) > 2, "year should be in YYYY format" assert 1 <= month <= 12, "month should be between 1 to 12" assert 1 <= day <= 31, "day should be between 1 to 31" # Doomsday algorithm: _a : Tuple =year // 100 _a : List[str] =(5 * (century % 4) + 2) % 7 _a : str =year % 100 _a : str =centurian % 12 _a : Optional[int] =( (centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor ) % 7 _a : int =( DOOMSDAY_NOT_LEAP[month - 1] if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0) else DOOMSDAY_LEAP[month - 1] ) _a : Union[str, Any] =(dooms_day + day - day_anchor) % 7 return WEEK_DAY_NAMES[week_day] if __name__ == "__main__": import doctest doctest.testmod()
276
UpperCamelCase = [0, 2, 4, 6, 8] UpperCamelCase = [1, 3, 5, 7, 9] def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[int] , _lowerCamelCase : int): if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1 , -1 , -1): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 10 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 lowercase__ : str = 0 for digit in range(10): lowercase__ : str = digit result += reversible_numbers( 0 , (remainder + 2 * digit) // 10 , _lowerCamelCase , _lowerCamelCase) return result lowercase__ : Dict = 0 for digita in range(10): lowercase__ : int = digita if (remainder + digita) % 2 == 0: lowercase__ : Optional[Any] = ODD_DIGITS else: lowercase__ : str = EVEN_DIGITS for digita in other_parity_digits: lowercase__ : List[str] = digita result += reversible_numbers( remaining_length - 2 , (remainder + digita + digita) // 10 , _lowerCamelCase , _lowerCamelCase , ) return result def lowercase_ ( _lowerCamelCase : int = 9): lowercase__ : Tuple = 0 for length in range(1 , max_power + 1): result += reversible_numbers(_lowerCamelCase , 0 , [0] * length , _lowerCamelCase) return result if __name__ == "__main__": print(f"{solution() = }")
87
0
'''simple docstring''' def _UpperCAmelCase ( _lowerCamelCase : list[list[int]] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[int] ) -> Union[str, Any]: # 1. Validate that path exists between current and next vertices if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def _UpperCAmelCase ( _lowerCamelCase : list[list[int]] , _lowerCamelCase : list[int] , _lowerCamelCase : int ) -> Dict: # Base Case if curr_ind == len(_lowerCamelCase ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 , len(_lowerCamelCase ) ): if valid_connection(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): # Insert current vertex into path as next transition _lowerCAmelCase : Any = next_ver # Validate created path if util_hamilton_cycle(_lowerCamelCase , _lowerCamelCase , curr_ind + 1 ): return True # Backtrack _lowerCAmelCase : Optional[Any] = -1 return False def _UpperCAmelCase ( _lowerCamelCase : list[list[int]] , _lowerCamelCase : int = 0 ) -> int: _lowerCAmelCase : Optional[Any] = [-1] * (len(_lowerCamelCase ) + 1) # initialize start and end of path with starting index _lowerCAmelCase : List[Any] = start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(_lowerCamelCase , _lowerCamelCase , 1 ) else []
309
import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets UpperCamelCase = '''\ @inproceedings{snover-etal-2006-study, title = "A Study of Translation Edit Rate with Targeted Human Annotation", author = "Snover, Matthew and Dorr, Bonnie and Schwartz, Rich and Micciulla, Linnea and Makhoul, John", booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers", month = aug # " 8-12", year = "2006", address = "Cambridge, Massachusetts, USA", publisher = "Association for Machine Translation in the Americas", url = "https://aclanthology.org/2006.amta-papers.25", pages = "223--231", } @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' UpperCamelCase = '''\ TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu (https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found here: https://github.com/jhclark/tercom. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information. ''' UpperCamelCase = ''' Produces TER scores alongside the number of edits and reference length. Args: predictions (list of str): The system stream (a sequence of segments). references (list of list of str): A list of one or more reference streams (each a sequence of segments). normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters, as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana. Only applies if `normalized = True`. Defaults to `False`. case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`. Returns: \'score\' (float): TER score (num_edits / sum_ref_lengths * 100) \'num_edits\' (int): The cumulative number of edits \'ref_length\' (float): The cumulative average reference length Examples: Example 1: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0} Example 2: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0} Example 3: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... normalized=True, ... case_sensitive=True) >>> print(results) {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5} Example 4: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0} Example 5: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class snake_case_ ( datasets.Metric ): def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple: if version.parse(scb.__version__ ) < version.parse("1.4.12" ): raise ImportWarning( "To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n" "You can install it with `pip install \"sacrebleu>=1.4.12\"`." ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[ "https://github.com/jhclark/tercom", ] , ) def __UpperCamelCase ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , ) -> Any: lowercase__ : Optional[int] = len(references[0] ) if any(len(lowercase_ ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) lowercase__ : Union[str, Any] = [[refs[i] for refs in references] for i in range(lowercase_ )] lowercase__ : str = TER( normalized=lowercase_ , no_punct=lowercase_ , asian_support=lowercase_ , case_sensitive=lowercase_ , ) lowercase__ : List[str] = sb_ter.corpus_score(lowercase_ , lowercase_ ) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
87
0
import math from typing import Any, Callable, List, Optional, Tuple, Union import numpy as np import torch from ...models import TaFilmDecoder from ...schedulers import DDPMScheduler from ...utils import is_onnx_available, logging, randn_tensor if is_onnx_available(): from ..onnx_utils import OnnxRuntimeModel from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .continous_encoder import SpectrogramContEncoder from .notes_encoder import SpectrogramNotesEncoder a__: Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name a__: List[str] = 256 class SCREAMING_SNAKE_CASE__ ( __A ): __SCREAMING_SNAKE_CASE = ["melgan"] def __init__( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,): super().__init__() # From MELGAN A__ = math.log(1E-5 ) # Matches MelGAN training. A__ = 4.0 # Largest value for most examples A__ = 128 self.register_modules( notes_encoder=lowercase_,continuous_encoder=lowercase_,decoder=lowercase_,scheduler=lowercase_,melgan=lowercase_,) def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase=(-1.0, 1.0),__lowerCamelCase=False ): A__ = output_range if clip: A__ = torch.clip(lowercase_,self.min_value,self.max_value ) # Scale to [0, 1]. A__ = (features - self.min_value) / (self.max_value - self.min_value) # Scale to [min_out, max_out]. return zero_one * (max_out - min_out) + min_out def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase=(-1.0, 1.0),__lowerCamelCase=False ): A__ = input_range A__ = torch.clip(lowercase_,lowercase_,lowercase_ ) if clip else outputs # Scale to [0, 1]. A__ = (outputs - min_out) / (max_out - min_out) # Scale to [self.min_value, self.max_value]. return zero_one * (self.max_value - self.min_value) + self.min_value def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ): A__ = input_tokens > 0 A__ = self.notes_encoder( encoder_input_tokens=lowercase_,encoder_inputs_mask=lowercase_ ) A__ = self.continuous_encoder( encoder_inputs=lowercase_,encoder_inputs_mask=lowercase_ ) return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)] def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ): A__ = noise_time if not torch.is_tensor(lowercase_ ): A__ = torch.tensor([timesteps],dtype=torch.long,device=input_tokens.device ) elif torch.is_tensor(lowercase_ ) and len(timesteps.shape ) == 0: A__ = timesteps[None].to(input_tokens.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML A__ = timesteps * torch.ones(input_tokens.shape[0],dtype=timesteps.dtype,device=timesteps.device ) A__ = self.decoder( encodings_and_masks=lowercase_,decoder_input_tokens=lowercase_,decoder_noise_time=lowercase_ ) return logits @torch.no_grad() def __call__( self,__lowerCamelCase,__lowerCamelCase = None,__lowerCamelCase = 100,__lowerCamelCase = True,__lowerCamelCase = "numpy",__lowerCamelCase = None,__lowerCamelCase = 1,): if (callback_steps is None) or ( callback_steps is not None and (not isinstance(lowercase_,lowercase_ ) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(lowercase_ )}." ) A__ = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims],dtype=np.floataa ) A__ = np.zeros([1, 0, self.n_dims],np.floataa ) A__ = torch.ones((1, TARGET_FEATURE_LENGTH),dtype=lowercase_,device=self.device ) for i, encoder_input_tokens in enumerate(lowercase_ ): if i == 0: A__ = torch.from_numpy(pred_mel[:1].copy() ).to( device=self.device,dtype=self.decoder.dtype ) # The first chunk has no previous context. A__ = torch.zeros((1, TARGET_FEATURE_LENGTH),dtype=lowercase_,device=self.device ) else: # The full song pipeline does not feed in a context feature, so the mask # will be all 0s after the feature converter. Because we know we're # feeding in a full context chunk from the previous prediction, set it # to all 1s. A__ = ones A__ = self.scale_features( lowercase_,output_range=[-1.0, 1.0],clip=lowercase_ ) A__ = self.encode( input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ),continuous_inputs=lowercase_,continuous_mask=lowercase_,) # Sample encoder_continuous_inputs shaped gaussian noise to begin loop A__ = randn_tensor( shape=encoder_continuous_inputs.shape,generator=lowercase_,device=self.device,dtype=self.decoder.dtype,) # set step values self.scheduler.set_timesteps(lowercase_ ) # Denoising diffusion loop for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): A__ = self.decode( encodings_and_masks=lowercase_,input_tokens=lowercase_,noise_time=t / self.scheduler.config.num_train_timesteps,) # Compute previous output: x_t -> x_t-1 A__ = self.scheduler.step(lowercase_,lowercase_,lowercase_,generator=lowercase_ ).prev_sample A__ = self.scale_to_features(lowercase_,input_range=[-1.0, 1.0] ) A__ = mel[:1] A__ = mel.cpu().float().numpy() A__ = np.concatenate([full_pred_mel, pred_mel[:1]],axis=1 ) # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(lowercase_,lowercase_ ) logger.info('''Generated segment''',lowercase_ ) if output_type == "numpy" and not is_onnx_available(): raise ValueError( '''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' ) elif output_type == "numpy" and self.melgan is None: raise ValueError( '''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' ) if output_type == "numpy": A__ = self.melgan(input_features=full_pred_mel.astype(np.floataa ) ) else: A__ = full_pred_mel if not return_dict: return (output,) return AudioPipelineOutput(audios=lowercase_ )
193
def lowercase_ ( _lowerCamelCase : int): lowercase__ : Dict = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
87
0
import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def A ( _lowercase ): SCREAMING_SNAKE_CASE : int = [] embed.append( ( f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""", f"""stage{idx}.patch_embed.proj.weight""", ) ) embed.append( ( f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""", f"""stage{idx}.patch_embed.proj.bias""", ) ) embed.append( ( f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""", f"""stage{idx}.patch_embed.norm.weight""", ) ) embed.append( ( f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""", f"""stage{idx}.patch_embed.norm.bias""", ) ) return embed def A ( _lowercase , _lowercase ): SCREAMING_SNAKE_CASE : Optional[Any] = [] attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""", f"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""", f"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""", f"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""", f"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""", f"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""", f"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.attn.proj.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.attn.proj.bias""", ) ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", f"""stage{idx}.blocks.{cnt}.norm1.weight""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", f"""stage{idx}.blocks.{cnt}.norm1.bias""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", f"""stage{idx}.blocks.{cnt}.norm2.weight""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", f"""stage{idx}.blocks.{cnt}.norm2.bias""") ) return attention_weights def A ( _lowercase ): SCREAMING_SNAKE_CASE : Tuple = [] token.append((f"""cvt.encoder.stages.{idx}.cls_token""", '''stage2.cls_token''') ) return token def A ( ): SCREAMING_SNAKE_CASE : List[str] = [] head.append(('''layernorm.weight''', '''norm.weight''') ) head.append(('''layernorm.bias''', '''norm.bias''') ) head.append(('''classifier.weight''', '''head.weight''') ) head.append(('''classifier.bias''', '''head.bias''') ) return head def A ( _lowercase , _lowercase , _lowercase , _lowercase ): SCREAMING_SNAKE_CASE : Optional[Any] = "imagenet-1k-id2label.json" SCREAMING_SNAKE_CASE : List[str] = 1_000 SCREAMING_SNAKE_CASE : Dict = "huggingface/label-files" SCREAMING_SNAKE_CASE : List[Any] = num_labels SCREAMING_SNAKE_CASE : Tuple = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type='''dataset''' ) ) , '''r''' ) ) SCREAMING_SNAKE_CASE : Tuple = {int(_lowerCamelCase ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : Any = idalabel SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : Optional[int] = CvtConfig(num_labels=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13": SCREAMING_SNAKE_CASE : Any = [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21": SCREAMING_SNAKE_CASE : Tuple = [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: SCREAMING_SNAKE_CASE : Union[str, Any] = [2, 2, 20] SCREAMING_SNAKE_CASE : Optional[Any] = [3, 12, 16] SCREAMING_SNAKE_CASE : Optional[Any] = [192, 768, 1_024] SCREAMING_SNAKE_CASE : Union[str, Any] = CvtForImageClassification(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Tuple = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' ) SCREAMING_SNAKE_CASE : int = image_size SCREAMING_SNAKE_CASE : Dict = torch.load(_lowerCamelCase , map_location=torch.device('''cpu''' ) ) SCREAMING_SNAKE_CASE : Any = OrderedDict() SCREAMING_SNAKE_CASE : int = [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: SCREAMING_SNAKE_CASE : Dict = list_of_state_dict + cls_token(_lowerCamelCase ) SCREAMING_SNAKE_CASE : List[str] = list_of_state_dict + embeddings(_lowerCamelCase ) for cnt in range(config.depth[idx] ): SCREAMING_SNAKE_CASE : Any = list_of_state_dict + attention(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : List[str] = list_of_state_dict + final() for gg in list_of_state_dict: print(_lowerCamelCase ) for i in range(len(_lowerCamelCase ) ): SCREAMING_SNAKE_CASE : Dict = original_weights[list_of_state_dict[i][1]] model.load_state_dict(_lowerCamelCase ) model.save_pretrained(_lowerCamelCase ) image_processor.save_pretrained(_lowerCamelCase ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": __UpperCamelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument( '--cvt_model', default='cvt-w24', type=str, help='Name of the cvt model you\'d like to convert.', ) parser.add_argument( '--image_size', default=384, type=int, help='Input Image Size', ) parser.add_argument( '--cvt_file_name', default=R'cvtmodels\CvT-w24-384x384-IN-22k.pth', type=str, help='Input Image Size', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) __UpperCamelCase : List[str] = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
182
from PIL import Image def lowercase_ ( _lowerCamelCase : Image , _lowerCamelCase : int): lowercase__ : List[str] = (259 * (level + 255)) / (255 * (259 - level)) def contrast(_lowerCamelCase : int) -> int: return int(128 + factor * (c - 128)) return img.point(_lowerCamelCase) if __name__ == "__main__": # Load image with Image.open('''image_data/lena.jpg''') as img: # Change contrast to 170 UpperCamelCase = change_contrast(img, 170) cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
87
0
import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class _lowercase ( unittest.TestCase ): '''simple docstring''' def __magic_name__( self :Union[str, Any] ) -> Dict: debug_launcher(test_script.main ) def __magic_name__( self :Any ) -> str: debug_launcher(test_ops.main )
9
from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar UpperCamelCase = TypeVar('''T''') class snake_case_ ( Generic[T] ): __A : deque[T] # Cache store of keys __A : set[T] # References of the keys in cache __A : int = 10 # Maximum capacity of cache def __init__( self : Union[str, Any] , lowercase_ : int ) -> None: lowercase__ : int = deque() lowercase__ : str = set() if not n: lowercase__ : str = sys.maxsize elif n < 0: raise ValueError("n should be an integer greater than 0." ) else: lowercase__ : List[Any] = n def __UpperCamelCase ( self : Dict , lowercase_ : T ) -> None: if x not in self.key_reference: if len(self.dq_store ) == LRUCache._MAX_CAPACITY: lowercase__ : Dict = self.dq_store.pop() self.key_reference.remove(lowercase_ ) else: self.dq_store.remove(lowercase_ ) self.dq_store.appendleft(lowercase_ ) self.key_reference.add(lowercase_ ) def __UpperCamelCase ( self : Dict ) -> None: for k in self.dq_store: print(lowercase_ ) def __repr__( self : Optional[int] ) -> str: return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}''' if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase = LRUCache(4) lru_cache.refer('''A''') lru_cache.refer(2) lru_cache.refer(3) lru_cache.refer('''A''') lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() print(lru_cache) assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
87
0
"""simple docstring""" from typing import Dict import numpy as np from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException if is_tf_available(): import tensorflow as tf from ..tf_utils import stable_softmax if is_torch_available(): import torch __UpperCamelCase : str = logging.get_logger(__name__) @add_end_docstrings( __A , R"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , ) class SCREAMING_SNAKE_CASE ( __A ): """simple docstring""" def __lowerCAmelCase ( self : int ,lowercase_ : GenericTensor ): if self.framework == "tf": lowerCAmelCase__ : str = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy() elif self.framework == "pt": lowerCAmelCase__ : Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id ,as_tuple=lowercase_ ) else: raise ValueError('''Unsupported framework''' ) return masked_index def __lowerCAmelCase ( self : str ,lowercase_ : GenericTensor ): lowerCAmelCase__ : str = self.get_masked_index(lowercase_ ) lowerCAmelCase__ : Union[str, Any] = np.prod(masked_index.shape ) if numel < 1: raise PipelineException( '''fill-mask''' ,self.model.base_model_prefix ,F'No mask_token ({self.tokenizer.mask_token}) found on the input' ,) def __lowerCAmelCase ( self : Dict ,lowercase_ : GenericTensor ): if isinstance(lowercase_ ,lowercase_ ): for model_input in model_inputs: self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] ) else: for input_ids in model_inputs["input_ids"]: self._ensure_exactly_one_mask_token(lowercase_ ) def __lowerCAmelCase ( self : List[str] ,lowercase_ : Tuple ,lowercase_ : int=None ,**lowercase_ : Union[str, Any] ): if return_tensors is None: lowerCAmelCase__ : int = self.framework lowerCAmelCase__ : Dict = self.tokenizer(lowercase_ ,return_tensors=lowercase_ ) self.ensure_exactly_one_mask_token(lowercase_ ) return model_inputs def __lowerCAmelCase ( self : str ,lowercase_ : List[Any] ): lowerCAmelCase__ : Optional[int] = self.model(**lowercase_ ) lowerCAmelCase__ : Union[str, Any] = model_inputs["input_ids"] return model_outputs def __lowerCAmelCase ( self : int ,lowercase_ : Union[str, Any] ,lowercase_ : Dict=5 ,lowercase_ : List[str]=None ): # Cap top_k if there are targets if target_ids is not None and target_ids.shape[0] < top_k: lowerCAmelCase__ : List[str] = target_ids.shape[0] lowerCAmelCase__ : Dict = model_outputs["input_ids"][0] lowerCAmelCase__ : Dict = model_outputs["logits"] if self.framework == "tf": lowerCAmelCase__ : Tuple = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0] lowerCAmelCase__ : Optional[Any] = outputs.numpy() lowerCAmelCase__ : int = outputs[0, masked_index, :] lowerCAmelCase__ : Optional[int] = stable_softmax(lowercase_ ,axis=-1 ) if target_ids is not None: lowerCAmelCase__ : str = tf.gather_nd(tf.squeeze(lowercase_ ,0 ) ,target_ids.reshape(-1 ,1 ) ) lowerCAmelCase__ : Dict = tf.expand_dims(lowercase_ ,0 ) lowerCAmelCase__ : int = tf.math.top_k(lowercase_ ,k=lowercase_ ) lowerCAmelCase__ : Any = topk.values.numpy(), topk.indices.numpy() else: lowerCAmelCase__ : Tuple = torch.nonzero(input_ids == self.tokenizer.mask_token_id ,as_tuple=lowercase_ ).squeeze(-1 ) # Fill mask pipeline supports only one ${mask_token} per sample lowerCAmelCase__ : Dict = outputs[0, masked_index, :] lowerCAmelCase__ : Union[str, Any] = logits.softmax(dim=-1 ) if target_ids is not None: lowerCAmelCase__ : Optional[int] = probs[..., target_ids] lowerCAmelCase__ : str = probs.topk(lowercase_ ) lowerCAmelCase__ : Optional[int] = [] lowerCAmelCase__ : Tuple = values.shape[0] == 1 for i, (_values, _predictions) in enumerate(zip(values.tolist() ,predictions.tolist() ) ): lowerCAmelCase__ : int = [] for v, p in zip(_values ,_predictions ): # Copy is important since we're going to modify this array in place lowerCAmelCase__ : str = input_ids.numpy().copy() if target_ids is not None: lowerCAmelCase__ : str = target_ids[p].tolist() lowerCAmelCase__ : str = p # Filter padding out: lowerCAmelCase__ : List[Any] = tokens[np.where(tokens != self.tokenizer.pad_token_id )] # Originally we skip special tokens to give readable output. # For multi masks though, the other [MASK] would be removed otherwise # making the output look odd, so we add them back lowerCAmelCase__ : int = self.tokenizer.decode(lowercase_ ,skip_special_tokens=lowercase_ ) lowerCAmelCase__ : int = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence} row.append(lowercase_ ) result.append(lowercase_ ) if single_mask: return result[0] return result def __lowerCAmelCase ( self : Any ,lowercase_ : Optional[Any] ,lowercase_ : Optional[Any]=None ): if isinstance(lowercase_ ,lowercase_ ): lowerCAmelCase__ : Tuple = [targets] try: lowerCAmelCase__ : Tuple = self.tokenizer.get_vocab() except Exception: lowerCAmelCase__ : Tuple = {} lowerCAmelCase__ : Optional[int] = [] for target in targets: lowerCAmelCase__ : Optional[int] = vocab.get(lowercase_ ,lowercase_ ) if id_ is None: lowerCAmelCase__ : str = self.tokenizer( lowercase_ ,add_special_tokens=lowercase_ ,return_attention_mask=lowercase_ ,return_token_type_ids=lowercase_ ,max_length=1 ,truncation=lowercase_ ,)["input_ids"] if len(lowercase_ ) == 0: logger.warning( F'The specified target token `{target}` does not exist in the model vocabulary. ' '''We cannot replace it with anything meaningful, ignoring it''' ) continue lowerCAmelCase__ : Any = input_ids[0] # XXX: If users encounter this pass # it becomes pretty slow, so let's make sure # The warning enables them to fix the input to # get faster performance. logger.warning( F'The specified target token `{target}` does not exist in the model vocabulary. ' F'Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.' ) target_ids.append(id_ ) lowerCAmelCase__ : Optional[int] = list(set(lowercase_ ) ) if len(lowercase_ ) == 0: raise ValueError('''At least one target must be provided when passed.''' ) lowerCAmelCase__ : Union[str, Any] = np.array(lowercase_ ) return target_ids def __lowerCAmelCase ( self : str ,lowercase_ : List[str]=None ,lowercase_ : str=None ): lowerCAmelCase__ : List[str] = {} if targets is not None: lowerCAmelCase__ : Tuple = self.get_target_ids(lowercase_ ,lowercase_ ) lowerCAmelCase__ : str = target_ids if top_k is not None: lowerCAmelCase__ : Optional[Any] = top_k if self.tokenizer.mask_token_id is None: raise PipelineException( '''fill-mask''' ,self.model.base_model_prefix ,'''The tokenizer does not define a `mask_token`.''' ) return {}, {}, postprocess_params def __call__( self : Dict ,lowercase_ : str ,*lowercase_ : Optional[Any] ,**lowercase_ : Optional[int] ): lowerCAmelCase__ : List[Any] = super().__call__(lowercase_ ,**lowercase_ ) if isinstance(lowercase_ ,lowercase_ ) and len(lowercase_ ) == 1: return outputs[0] return outputs
106
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''', '''YituTech/conv-bert-medium-small''': ( '''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json''' ), '''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''', # See all ConvBERT models at https://huggingface.co/models?filter=convbert } class snake_case_ ( __A ): __A : List[str] = "convbert" def __init__( self : Union[str, Any] , lowercase_ : str=3_05_22 , lowercase_ : Any=7_68 , lowercase_ : Tuple=12 , lowercase_ : List[str]=12 , lowercase_ : Optional[int]=30_72 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : str=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : Optional[Any]=5_12 , lowercase_ : Dict=2 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Optional[Any]=1E-12 , lowercase_ : Optional[int]=1 , lowercase_ : List[Any]=0 , lowercase_ : Optional[int]=2 , lowercase_ : str=7_68 , lowercase_ : Dict=2 , lowercase_ : Optional[Any]=9 , lowercase_ : Union[str, Any]=1 , lowercase_ : Any=None , **lowercase_ : Optional[Any] , ) -> Dict: super().__init__( pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ , ) lowercase__ : List[str] = vocab_size lowercase__ : Union[str, Any] = hidden_size lowercase__ : Any = num_hidden_layers lowercase__ : List[str] = num_attention_heads lowercase__ : Union[str, Any] = intermediate_size lowercase__ : Optional[Any] = hidden_act lowercase__ : int = hidden_dropout_prob lowercase__ : str = attention_probs_dropout_prob lowercase__ : Union[str, Any] = max_position_embeddings lowercase__ : Optional[int] = type_vocab_size lowercase__ : Tuple = initializer_range lowercase__ : List[str] = layer_norm_eps lowercase__ : List[Any] = embedding_size lowercase__ : Optional[Any] = head_ratio lowercase__ : Dict = conv_kernel_size lowercase__ : Tuple = num_groups lowercase__ : Optional[int] = classifier_dropout class snake_case_ ( __A ): @property def __UpperCamelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowercase__ : Tuple = {0: "batch", 1: "choice", 2: "sequence"} else: lowercase__ : str = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis), ] )
87
0
import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class A ( __A , __A ): '''simple docstring''' @register_to_config def __init__(self : Any , *, _UpperCAmelCase : int = 4 , _UpperCAmelCase : int = 768 , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , ) -> Any: """simple docstring""" super().__init__() lowercase__ = nn.Parameter(torch.zeros(lowercase_ ) ) # parameters for additional clip time embeddings lowercase__ = nn.Linear(lowercase_ , lowercase_ ) lowercase__ = nn.Linear(lowercase_ , lowercase_ ) # parameters for encoder hidden states lowercase__ = clip_extra_context_tokens lowercase__ = nn.Linear( lowercase_ , self.clip_extra_context_tokens * cross_attention_dim ) lowercase__ = nn.Linear(lowercase_ , lowercase_ ) lowercase__ = nn.LayerNorm(lowercase_ ) def lowerCamelCase__ (self : Optional[Any] , *, _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str ) -> Any: """simple docstring""" if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings lowercase__ = image_embeddings.shape[0] lowercase__ = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 ) lowercase__ = classifier_free_guidance_embeddings.expand( lowercase_ , -1 ) lowercase__ = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 ) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] lowercase__ = prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... lowercase__ = self.embedding_proj(lowercase_ ) lowercase__ = self.clip_image_embeddings_project_to_time_embeddings(lowercase_ ) lowercase__ = time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" lowercase__ = self.clip_extra_context_tokens_proj(lowercase_ ) lowercase__ = clip_extra_context_tokens.reshape(lowercase_ , -1 , self.clip_extra_context_tokens ) lowercase__ = clip_extra_context_tokens.permute(0 , 2 , 1 ) lowercase__ = self.encoder_hidden_states_proj(lowercase_ ) lowercase__ = self.text_encoder_hidden_states_norm(lowercase_ ) lowercase__ = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 ) return text_encoder_hidden_states, additive_clip_time_embeddings
305
import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict): # Initialise PyTorch model lowercase__ : List[str] = BertConfig.from_json_file(_lowerCamelCase) print(f'''Building PyTorch model from configuration: {config}''') lowercase__ : Optional[Any] = BertForPreTraining(_lowerCamelCase) # Load weights from tf checkpoint load_tf_weights_in_bert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''') torch.save(model.state_dict() , _lowerCamelCase) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--bert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained BERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) UpperCamelCase = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
87
0
import argparse import dataclasses import json import logging import os import shutil from typing import List, Optional import datasets from accelerate import Accelerator from datasets import load_dataset from finetuning import finetune from tqdm.auto import tqdm import transformers from transformers import AutoConfig, set_seed from transformers.trainer_utils import IntervalStrategy _lowercase : Union[str, Any] =logging.getLogger(__name__) _lowercase : List[str] ="pytorch_model.bin" @dataclasses.dataclass class snake_case__ : """simple docstring""" __lowerCAmelCase :str = dataclasses.field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} ) __lowerCAmelCase :Optional[str] = dataclasses.field( default=__A , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , ) @dataclasses.dataclass class snake_case__ : """simple docstring""" __lowerCAmelCase :str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} ) __lowerCAmelCase :str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} ) __lowerCAmelCase :Optional[str] = dataclasses.field( default=__A , metadata={"help": "A csv or a json file containing the validation data."} ) __lowerCAmelCase :Optional[str] = dataclasses.field( default=__A , metadata={"help": "The name of the task to train on."} , ) __lowerCAmelCase :Optional[List[str]] = dataclasses.field( default=__A , metadata={"help": "The list of labels for the task."} ) @dataclasses.dataclass class snake_case__ : """simple docstring""" __lowerCAmelCase :str = dataclasses.field( metadata={"help": "The output directory where the model predictions and checkpoints will be written."} ) __lowerCAmelCase :Optional[str] = dataclasses.field( default="accuracy" , metadata={"help": "The evaluation metric used for the task."} ) __lowerCAmelCase :Optional[str] = dataclasses.field( default="no" , metadata={ "help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]" } , ) __lowerCAmelCase :Optional[int] = dataclasses.field( default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , ) __lowerCAmelCase :Optional[float] = dataclasses.field( default=0.0 , metadata={ "help": "How much the specified evaluation metric must improve to satisfy early stopping conditions." } , ) __lowerCAmelCase :Optional[bool] = dataclasses.field( default=__A , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , ) __lowerCAmelCase :Optional[bool] = dataclasses.field( default=__A , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , ) __lowerCAmelCase :Optional[bool] = dataclasses.field( default=__A , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , ) __lowerCAmelCase :Optional[float] = dataclasses.field( default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , ) __lowerCAmelCase :Optional[int] = dataclasses.field( default=100 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , ) __lowerCAmelCase :Optional[int] = dataclasses.field( default=__A , metadata={"help": "Random seed for initialization."} , ) def lowerCAmelCase_ ( _lowercase : Tuple , _lowercase : List[str] , _lowercase : Tuple , _lowercase : str , _lowercase : Any , _lowercase : List[Any]) -> Dict: """simple docstring""" a__ : Optional[int] = datasets.concatenate_datasets([infer_input, infer_output] , axis=1) if args.do_filter_by_confidence: a__ : Dict = dataset.filter(lambda _lowercase: example["probability"] > args.confidence_threshold) if args.do_filter_by_val_performance: assert eval_result >= 0.0 and eval_result <= 1.0 a__ : int = int(eval_result * len(_lowerCamelCase)) print(_lowerCamelCase) a__ : str = dataset.sort("""probability""" , reverse=_lowerCamelCase) a__ : str = dataset.select(range(_lowerCamelCase)) a__ : Optional[Any] = dataset.remove_columns(["""label""", """probability"""]) a__ : Any = dataset.rename_column("""prediction""" , """label""") a__ : Any = dataset.map(lambda _lowercase: {"label": idalabel[example["label"]]}) a__ : List[Any] = dataset.shuffle(seed=args.seed) a__ : List[str] = os.path.join(_lowerCamelCase , F'''train_pseudo.{args.data_file_extension}''') if args.data_file_extension == "csv": dataset.to_csv(_lowerCamelCase , index=_lowerCamelCase) else: dataset.to_json(_lowerCamelCase) def lowerCAmelCase_ ( _lowercase : Optional[Any] , _lowercase : Any , _lowercase : Tuple , _lowercase : Optional[int] , **_lowercase : str) -> Optional[int]: """simple docstring""" a__ : List[Any] = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , ) logger.info(accelerator.state) # Setup logging, we only want one process per machine to log things on the # screen. accelerator.is_local_main_process is only True for one process per # machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() a__ : Optional[int] = STModelArguments(model_name_or_path=_lowerCamelCase) a__ : Dict = STDataArguments(train_file=_lowerCamelCase , infer_file=_lowerCamelCase) a__ : List[str] = STTrainingArguments(output_dir=_lowerCamelCase) a__ : Union[str, Any] = argparse.Namespace() for arg_class in (model_args, data_args, training_args): for key, value in vars(_lowerCamelCase).items(): setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) for key, value in kwargs.items(): if hasattr(_lowerCamelCase , _lowerCamelCase): setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # Sanity checks a__ : Optional[int] = {} a__ : List[Any] = None # You need to provide the training data and the data to predict on assert args.train_file is not None assert args.infer_file is not None a__ : int = args.train_file a__ : Tuple = args.infer_file if args.evaluation_strategy != IntervalStrategy.NO.value: assert args.eval_file is not None a__ : List[Any] = args.eval_file for key in data_files: a__ : str = data_files[key].split(""".""")[-1] assert extension in ["csv", "json"], F'''`{key}_file` should be a csv or a json file.''' if args.data_file_extension is None: a__ : int = extension else: assert extension == args.data_file_extension, F'''`{key}_file` should be a {args.data_file_extension} file`.''' assert ( args.eval_metric in datasets.list_metrics() ), F'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.''' # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) logger.info("""Creating the initial data directory for self-training...""") a__ : Optional[int] = F'''{args.output_dir}/self-train_iter-{{}}'''.format a__ : str = data_dir_format(0) if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir , exist_ok=_lowerCamelCase) os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase) accelerator.wait_for_everyone() a__ : Dict = None a__ : Union[str, Any] = None a__ : Union[str, Any] = 0 a__ : Tuple = False # Show the progress bar a__ : Optional[int] = tqdm(range(args.max_selftrain_iterations) , disable=not accelerator.is_local_main_process) # Self-train for iteration in range(0 , int(args.max_selftrain_iterations)): a__ : List[str] = data_dir_format(_lowerCamelCase) assert os.path.exists(_lowerCamelCase) # Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for # iteration > 0 a__ : Dict = os.path.join(_lowerCamelCase , """stage-1""") a__ : List[str] = { "accelerator": accelerator, "model_name_or_path": args.model_name_or_path, "cache_dir": args.cache_dir, "do_train": True, "train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"], "do_eval": True if args.eval_file is not None else False, "eval_file": data_files["eval"], "do_predict": True, "infer_file": data_files["infer"], "task_name": args.task_name, "label_list": args.label_list, "output_dir": current_output_dir, "eval_metric": args.eval_metric, "evaluation_strategy": args.evaluation_strategy, "early_stopping_patience": args.early_stopping_patience, "early_stopping_threshold": args.early_stopping_threshold, "seed": args.seed, } # Add additional training arguments for key, value in kwargs.items(): if key not in arguments_dict and not hasattr(_lowerCamelCase , _lowerCamelCase): arguments_dict.update({key: value}) a__ : Optional[Any] = os.path.join(_lowerCamelCase , """best-checkpoint""" , _lowerCamelCase) if os.path.exists(_lowerCamelCase): logger.info( """Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.""" , _lowerCamelCase , _lowerCamelCase , ) else: logger.info("""***** Running self-training: iteration: %d, stage: 1 *****""" , _lowerCamelCase) finetune(**_lowerCamelCase) accelerator.wait_for_everyone() assert os.path.exists(_lowerCamelCase) logger.info("""Self-training job completed: iteration: %d, stage: 1.""" , _lowerCamelCase) if iteration > 0 and args.finetune_on_labeled_data: # Stage 2 (optional): fine-tuning on the original labeled data a__ : Tuple = os.path.join(_lowerCamelCase , """best-checkpoint""") a__ : str = os.path.join(_lowerCamelCase , """stage-2""") # Update arguments_dict a__ : int = model_path a__ : List[str] = data_files["train"] a__ : List[str] = current_output_dir a__ : str = os.path.join(_lowerCamelCase , """best-checkpoint""" , _lowerCamelCase) if os.path.exists(_lowerCamelCase): logger.info( """Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.""" , _lowerCamelCase , _lowerCamelCase , ) else: logger.info("""***** Running self-training: iteration: %d, stage: 2 *****""" , _lowerCamelCase) finetune(**_lowerCamelCase) accelerator.wait_for_everyone() assert os.path.exists(_lowerCamelCase) logger.info("""Self-training job completed: iteration: %d, stage: 2.""" , _lowerCamelCase) a__ : Any = iteration a__ : List[str] = data_dir_format(iteration + 1) a__ : Optional[Any] = AutoConfig.from_pretrained(os.path.join(_lowerCamelCase , """best-checkpoint""")) a__ : Optional[Any] = config.idalabel a__ : Optional[int] = os.path.join(_lowerCamelCase , """eval_results_best-checkpoint.json""") a__ : Dict = os.path.join(_lowerCamelCase , """test_results_best-checkpoint.json""") assert os.path.exists(_lowerCamelCase) with open(_lowerCamelCase , """r""") as f: a__ : Dict = float(json.load(_lowerCamelCase)[args.eval_metric]) a__ : Any = os.path.join(_lowerCamelCase , """infer_output_best-checkpoint.csv""") assert os.path.exists(_lowerCamelCase) # Loading the dataset from local csv or json files. a__ : List[Any] = load_dataset(args.data_file_extension , data_files={"""data""": data_files["""infer"""]})["data"] a__ : Tuple = load_dataset("""csv""" , data_files={"""data""": infer_output_file})["data"] if accelerator.is_main_process: os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase) shutil.copy(_lowerCamelCase , os.path.join(_lowerCamelCase , F'''eval_results_iter-{iteration}.json''')) if os.path.exists(_lowerCamelCase): shutil.copy(_lowerCamelCase , os.path.join(_lowerCamelCase , F'''test_results_iter-{iteration}.json''')) create_pseudo_labeled_data(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) accelerator.wait_for_everyone() a__ : Dict = os.path.join(_lowerCamelCase , F'''train_pseudo.{args.data_file_extension}''') if args.evaluation_strategy != IntervalStrategy.NO.value: a__ : Union[str, Any] = eval_result if best_iteration is None: a__ : Union[str, Any] = new_iteration a__ : List[Any] = new_eval_result else: if new_eval_result - best_eval_result > args.early_stopping_threshold: a__ : Dict = new_iteration a__ : Any = new_eval_result a__ : str = 0 else: if new_eval_result == best_eval_result: a__ : str = new_iteration a__ : List[Any] = new_eval_result early_stopping_patience_counter += 1 if early_stopping_patience_counter >= args.early_stopping_patience: a__ : Dict = True progress_bar.update(1) if should_training_stop: break if best_iteration is not None: # Save the best iteration logger.info("""Best iteration: %d""" , _lowerCamelCase) logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , _lowerCamelCase) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(_lowerCamelCase , F'''eval_results_iter-{iteration}.json''') , os.path.join(_lowerCamelCase , """eval_results_best-iteration.json""") , ) else: # Assume that the last iteration is the best logger.info("""Best iteration: %d""" , args.max_selftrain_iterations - 1) logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , _lowerCamelCase) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(_lowerCamelCase , F'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''') , os.path.join(_lowerCamelCase , """eval_results_best-iteration.json""") , )
170
import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse('''3.8'''): import importlib_metadata else: import importlib.metadata as importlib_metadata def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : List[str]=False): try: lowercase__ : Union[str, Any] = os.environ[key] except KeyError: # KEY isn't set, default to `default`. lowercase__ : int = default else: # KEY is set, convert it to True or False. try: lowercase__ : Optional[int] = strtobool(_lowerCamelCase) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f'''If set, {key} must be yes or no.''') return _value UpperCamelCase = parse_flag_from_env('''RUN_SLOW''', default=False) UpperCamelCase = parse_flag_from_env('''RUN_REMOTE''', default=False) UpperCamelCase = parse_flag_from_env('''RUN_LOCAL''', default=True) UpperCamelCase = parse_flag_from_env('''RUN_PACKAGED''', default=True) # Compression UpperCamelCase = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''') UpperCamelCase = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''') UpperCamelCase = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''') # Audio UpperCamelCase = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''), reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''', ) # Beam UpperCamelCase = pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''), reason='''test requires apache-beam and a compatible dill version''', ) # Dill-cloudpickle compatibility UpperCamelCase = pytest.mark.skipif( config.DILL_VERSION <= version.parse('''0.3.2'''), reason='''test requires dill>0.3.2 for cloudpickle compatibility''', ) # Windows UpperCamelCase = pytest.mark.skipif( sys.platform == '''win32''', reason='''test should not be run on Windows''', ) def lowercase_ ( _lowerCamelCase : int): try: import faiss # noqa except ImportError: lowercase__ : Optional[Any] = unittest.skip("test requires faiss")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): try: import regex # noqa except ImportError: lowercase__ : List[Any] = unittest.skip("test requires regex")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): try: import elasticsearch # noqa except ImportError: lowercase__ : Optional[int] = unittest.skip("test requires elasticsearch")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Union[str, Any]): try: import sqlalchemy # noqa except ImportError: lowercase__ : Optional[int] = unittest.skip("test requires sqlalchemy")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): if not config.TORCH_AVAILABLE: lowercase__ : Tuple = unittest.skip("test requires PyTorch")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Tuple): if not config.TF_AVAILABLE: lowercase__ : Any = unittest.skip("test requires TensorFlow")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Dict): if not config.JAX_AVAILABLE: lowercase__ : List[str] = unittest.skip("test requires JAX")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): if not config.PIL_AVAILABLE: lowercase__ : Dict = unittest.skip("test requires Pillow")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Tuple): try: import transformers # noqa F401 except ImportError: return unittest.skip("test requires transformers")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : Optional[Any]): try: import tiktoken # noqa F401 except ImportError: return unittest.skip("test requires tiktoken")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : Dict): try: import spacy # noqa F401 except ImportError: return unittest.skip("test requires spacy")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : Optional[int]): def _require_spacy_model(_lowerCamelCase : Optional[int]): try: import spacy # noqa F401 spacy.load(_lowerCamelCase) except ImportError: return unittest.skip("test requires spacy")(_lowerCamelCase) except OSError: return unittest.skip("test requires spacy model '{}'".format(_lowerCamelCase))(_lowerCamelCase) else: return test_case return _require_spacy_model def lowercase_ ( _lowerCamelCase : Dict): try: import pyspark # noqa F401 except ImportError: return unittest.skip("test requires pyspark")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : List[str]): try: import joblibspark # noqa F401 except ImportError: return unittest.skip("test requires joblibspark")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : Dict): if not _run_slow_tests or _run_slow_tests == 0: lowercase__ : Tuple = unittest.skip("test is slow")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): if not _run_local_tests or _run_local_tests == 0: lowercase__ : str = unittest.skip("test is local")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Optional[int]): if not _run_packaged_tests or _run_packaged_tests == 0: lowercase__ : List[Any] = unittest.skip("test is packaged")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Tuple): if not _run_remote_tests or _run_remote_tests == 0: lowercase__ : Union[str, Any] = unittest.skip("test requires remote")(_lowerCamelCase) return test_case def lowercase_ ( *_lowerCamelCase : str): def decorate(cls : str): for name, fn in cls.__dict__.items(): if callable(_lowerCamelCase) and name.startswith("test"): for decorator in decorators: lowercase__ : Optional[int] = decorator(_lowerCamelCase) setattr(cls , _lowerCamelCase , _lowerCamelCase) return cls return decorate class snake_case_ ( __A ): pass class snake_case_ ( __A ): __A : List[Any] = 0 __A : str = 1 __A : int = 2 @contextmanager def lowercase_ ( _lowerCamelCase : List[str]=OfflineSimulationMode.CONNECTION_FAILS , _lowerCamelCase : int=1E-16): lowercase__ : int = requests.Session().request def timeout_request(_lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Dict , **_lowerCamelCase : str): # Change the url to an invalid url so that the connection hangs lowercase__ : Any = "https://10.255.255.1" if kwargs.get("timeout") is None: raise RequestWouldHangIndefinitelyError( f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''') lowercase__ : Dict = timeout try: return online_request(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier lowercase__ : Dict = url lowercase__ : Union[str, Any] = e.args[0] lowercase__ : Optional[Any] = (max_retry_error.args[0].replace("10.255.255.1" , f'''OfflineMock[{url}]'''),) lowercase__ : int = (max_retry_error,) raise def raise_connection_error(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] , **_lowerCamelCase : Tuple): raise requests.ConnectionError("Offline mode is enabled." , request=_lowerCamelCase) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch("requests.Session.send" , _lowerCamelCase): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch("requests.Session.request" , _lowerCamelCase): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase): yield else: raise ValueError("Please use a value from the OfflineSimulationMode enum.") @contextmanager def lowercase_ ( *_lowerCamelCase : str , **_lowerCamelCase : Tuple): lowercase__ : Dict = str(Path().resolve()) with tempfile.TemporaryDirectory(*_lowerCamelCase , **_lowerCamelCase) as tmp_dir: try: os.chdir(_lowerCamelCase) yield finally: os.chdir(_lowerCamelCase) @contextmanager def lowercase_ ( ): import gc gc.collect() lowercase__ : Union[str, Any] = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def lowercase_ ( ): import gc gc.collect() lowercase__ : int = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]): return deepcopy(_lowerCamelCase).integers(0 , 100 , 10).tolist() == deepcopy(_lowerCamelCase).integers(0 , 100 , 10).tolist() def lowercase_ ( _lowerCamelCase : str): import decorator from requests.exceptions import HTTPError def _wrapper(_lowerCamelCase : str , *_lowerCamelCase : Dict , **_lowerCamelCase : Dict): try: return func(*_lowerCamelCase , **_lowerCamelCase) except HTTPError as err: if str(_lowerCamelCase).startswith("500") or str(_lowerCamelCase).startswith("502"): pytest.xfail(str(_lowerCamelCase)) raise err return decorator.decorator(_wrapper , _lowerCamelCase) class snake_case_ : def __init__( self : int , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : List[str] ) -> List[str]: lowercase__ : Tuple = returncode lowercase__ : int = stdout lowercase__ : Union[str, Any] = stderr async def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict): while True: lowercase__ : Optional[int] = await stream.readline() if line: callback(_lowerCamelCase) else: break async def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : int=None , _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Tuple=False): if echo: print("\nRunning: " , " ".join(_lowerCamelCase)) lowercase__ : Optional[int] = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) lowercase__ : str = [] lowercase__ : List[str] = [] def tee(_lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int]=""): lowercase__ : Optional[int] = line.decode("utf-8").rstrip() sink.append(_lowerCamelCase) if not quiet: print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label="stdout:")), _read_stream(p.stderr , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label="stderr:")), ] , timeout=_lowerCamelCase , ) return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase) def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str]=None , _lowerCamelCase : Dict=None , _lowerCamelCase : int=180 , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Optional[Any]=True): lowercase__ : Any = asyncio.get_event_loop() lowercase__ : Tuple = loop.run_until_complete( _stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase)) lowercase__ : int = " ".join(_lowerCamelCase) if result.returncode > 0: lowercase__ : Any = "\n".join(result.stderr) raise RuntimeError( f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n''' f'''The combined stderr from workers follows:\n{stderr}''') # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(f'''\'{cmd_str}\' produced no output.''') return result def lowercase_ ( ): lowercase__ : List[str] = os.environ.get("PYTEST_XDIST_WORKER" , "gw0") lowercase__ : str = re.sub(R"^gw" , "" , _lowerCamelCase , 0 , re.M) return int(_lowerCamelCase) def lowercase_ ( ): lowercase__ : Union[str, Any] = 2_9500 lowercase__ : Optional[int] = pytest_xdist_worker_id() return port + uniq_delta
87
0
'''simple docstring''' import copy import tempfile import unittest from transformers import MaMaaaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder def _UpperCamelCase ( __A , __A , __A , __A=None , __A=None , __A=None , __A=None , __A=None , ) -> Union[str, Any]: '''simple docstring''' if attention_mask is None: UpperCamelCase__ = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: UpperCamelCase__ = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: UpperCamelCase__ = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=_lowerCamelCase ) if decoder_head_mask is None: UpperCamelCase__ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_lowerCamelCase ) if cross_attn_head_mask is None: UpperCamelCase__ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_lowerCamelCase ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class lowercase_ : def __init__( self , a , a=13 , a=7 , a=True , a=False , a=99 , a=16 , a=2 , a=4 , a=4 , a="relu" , a=0.1 , a=0.1 , a=0.0 , a=0.0 , a=20 , a=2 , a=1 , a=0 , ): UpperCamelCase__ = parent UpperCamelCase__ = batch_size UpperCamelCase__ = seq_length UpperCamelCase__ = is_training UpperCamelCase__ = use_labels UpperCamelCase__ = vocab_size UpperCamelCase__ = hidden_size UpperCamelCase__ = num_hidden_layers UpperCamelCase__ = num_attention_heads UpperCamelCase__ = intermediate_size UpperCamelCase__ = hidden_act UpperCamelCase__ = hidden_dropout_prob UpperCamelCase__ = attention_probs_dropout_prob UpperCamelCase__ = encoder_layerdrop UpperCamelCase__ = decoder_layerdrop UpperCamelCase__ = max_position_embeddings UpperCamelCase__ = eos_token_id UpperCamelCase__ = pad_token_id UpperCamelCase__ = bos_token_id def __a ( self ): UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase__ = self.eos_token_id # Eos Token UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for M2M100 the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input UpperCamelCase__ = input_ids.clamp(self.pad_token_id + 1 ) UpperCamelCase__ = decoder_input_ids.clamp(self.pad_token_id + 1 ) UpperCamelCase__ = self.get_config() UpperCamelCase__ = prepare_mam_aaa_inputs_dict(lowercase_ , lowercase_ , lowercase_ ) return config, inputs_dict def __a ( self ): return MaMaaaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , ) def __a ( self ): UpperCamelCase__ = self.prepare_config_and_inputs() return config, inputs_dict def __a ( self , a , a ): UpperCamelCase__ = MaMaaaModel(config=lowercase_ ).get_decoder().to(lowercase_ ).eval() UpperCamelCase__ = inputs_dict["input_ids"] UpperCamelCase__ = inputs_dict["attention_mask"] UpperCamelCase__ = inputs_dict["head_mask"] # first forward pass UpperCamelCase__ = model(lowercase_ , attention_mask=lowercase_ , head_mask=lowercase_ , use_cache=lowercase_ ) UpperCamelCase__ = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids UpperCamelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCamelCase__ = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and UpperCamelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCamelCase__ = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) UpperCamelCase__ = model(lowercase_ , attention_mask=lowercase_ )["last_hidden_state"] UpperCamelCase__ = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[ "last_hidden_state" ] # select random slice UpperCamelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCamelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCamelCase__ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-2 ) ) def __a ( self , a , a ): UpperCamelCase__ = MaMaaaModel(config=lowercase_ ).to(lowercase_ ).eval() UpperCamelCase__ = model(**lowercase_ ) UpperCamelCase__ = outputs.encoder_last_hidden_state UpperCamelCase__ = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase__ = model.get_encoder() encoder.save_pretrained(lowercase_ ) UpperCamelCase__ = MaMaaaEncoder.from_pretrained(lowercase_ ).to(lowercase_ ) UpperCamelCase__ = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase__ = model.get_decoder() decoder.save_pretrained(lowercase_ ) UpperCamelCase__ = MaMaaaDecoder.from_pretrained(lowercase_ ).to(lowercase_ ) UpperCamelCase__ = decoder( input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=lowercase_ , encoder_attention_mask=inputs_dict["attention_mask"] , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 ) @require_torch class lowercase_ ( __A , __A , __A , unittest.TestCase ): __UpperCAmelCase = ( ( MaMaaaModel, MaMaaaForConditionalGeneration, ) if is_torch_available() else () ) __UpperCAmelCase = (MaMaaaForConditionalGeneration,) if is_torch_available() else () __UpperCAmelCase = ( { "conversational": MaMaaaForConditionalGeneration, "feature-extraction": MaMaaaModel, "summarization": MaMaaaForConditionalGeneration, "text2text-generation": MaMaaaForConditionalGeneration, "translation": MaMaaaForConditionalGeneration, } if is_torch_available() else {} ) __UpperCAmelCase = True __UpperCAmelCase = True __UpperCAmelCase = False __UpperCAmelCase = False def __a ( self , a , a , a , a , a ): if pipeline_test_casse_name == "TranslationPipelineTests": # Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`. # `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer. return True return False def __a ( self ): UpperCamelCase__ = MaMaaaModelTester(self ) UpperCamelCase__ = ConfigTester(self , config_class=lowercase_ ) def __a ( self ): self.config_tester.run_common_tests() def __a ( self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: UpperCamelCase__ = model_class(lowercase_ ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowercase_ ) UpperCamelCase__ = model_class.from_pretrained(lowercase_ , output_loading_info=lowercase_ ) self.assertEqual(info["missing_keys"] , [] ) def __a ( self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowercase_ ) def __a ( self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*lowercase_ ) def __a ( self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration): UpperCamelCase__ = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() UpperCamelCase__ = copy.deepcopy(self._prepare_for_class(lowercase_ , lowercase_ ) ) if not self.is_encoder_decoder: UpperCamelCase__ = inputs["input_ids"] del inputs["input_ids"] else: UpperCamelCase__ = inputs["input_ids"] UpperCamelCase__ = inputs.get("decoder_input_ids" , lowercase_ ) del inputs["input_ids"] inputs.pop("decoder_input_ids" , lowercase_ ) UpperCamelCase__ = model.get_input_embeddings() if not self.is_encoder_decoder: UpperCamelCase__ = wte(lowercase_ ) else: UpperCamelCase__ = wte(lowercase_ ) UpperCamelCase__ = wte(lowercase_ ) with torch.no_grad(): model(**lowercase_ )[0] def __a ( self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() UpperCamelCase__ = input_dict["input_ids"] UpperCamelCase__ = input_ids.ne(1 ).to(lowercase_ ) UpperCamelCase__ = MaMaaaForConditionalGeneration(lowercase_ ).eval().to(lowercase_ ) if torch_device == "cuda": model.half() model.generate(lowercase_ , attention_mask=lowercase_ ) model.generate(num_beams=4 , do_sample=lowercase_ , early_stopping=lowercase_ , num_return_sequences=3 ) def _UpperCamelCase ( __A ) -> List[str]: '''simple docstring''' return torch.tensor(_lowerCamelCase , dtype=torch.long , device=_lowerCamelCase ) a__ : Dict = 1E-4 @require_torch @require_sentencepiece @require_tokenizers @slow class lowercase_ ( unittest.TestCase ): @cached_property def __a ( self ): return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" ) def __a ( self ): UpperCamelCase__ = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(lowercase_ ) UpperCamelCase__ = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] ) UpperCamelCase__ = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] ) UpperCamelCase__ = prepare_mam_aaa_inputs_dict(model.config , lowercase_ , lowercase_ ) with torch.no_grad(): UpperCamelCase__ = model(**lowercase_ )[0] UpperCamelCase__ = torch.Size((1, 11, 10_24) ) self.assertEqual(output.shape , lowercase_ ) # change to expected output here UpperCamelCase__ = torch.tensor( [[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=lowercase_ ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=lowercase_ ) ) def __a ( self ): UpperCamelCase__ = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(lowercase_ ) # change to intended input UpperCamelCase__ = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] ) UpperCamelCase__ = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] ) UpperCamelCase__ = prepare_mam_aaa_inputs_dict(model.config , lowercase_ , lowercase_ ) with torch.no_grad(): UpperCamelCase__ = model(**lowercase_ )[0] UpperCamelCase__ = torch.Size((1, 11, model.config.vocab_size) ) self.assertEqual(output.shape , lowercase_ ) # change to expected output here UpperCamelCase__ = torch.tensor( [[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=lowercase_ ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=lowercase_ ) ) def __a ( self ): UpperCamelCase__ = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(lowercase_ ) UpperCamelCase__ = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" ) UpperCamelCase__ = [ "L'affaire NSA souligne l'absence totale de débat sur le renseignement", "Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.", "Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent" " Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de" " l'ampleur de la surveillance américaine sur l'ensemble des communications en France.", ] # The below article tests that we don't add any hypotheses outside of the top n_beams UpperCamelCase__ = tokenizer(lowercase_ , padding=lowercase_ , return_tensors="pt" ) UpperCamelCase__ = model.generate( input_ids=dct["input_ids"].to(lowercase_ ) , attention_mask=dct["attention_mask"].to(lowercase_ ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , ) UpperCamelCase__ = [ "The NSA case highlights the total absence of intelligence debate", "I think there are two levels of response from the French government.", "When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S." " Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all" " communications in France.", ] UpperCamelCase__ = tokenizer.batch_decode( hypotheses_batch.tolist() , clean_up_tokenization_spaces=lowercase_ , skip_special_tokens=lowercase_ ) assert generated == expected_en
80
import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def lowercase_ ( _lowerCamelCase : int): lowercase__ : int = [] embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''', f'''stage{idx}.patch_embed.proj.weight''', )) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''', f'''stage{idx}.patch_embed.proj.bias''', )) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''', f'''stage{idx}.patch_embed.norm.weight''', )) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''', f'''stage{idx}.patch_embed.norm.bias''', )) return embed def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : int): lowercase__ : Optional[Any] = [] attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj.bias''', )) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''')) return attention_weights def lowercase_ ( _lowerCamelCase : Optional[int]): lowercase__ : Tuple = [] token.append((f'''cvt.encoder.stages.{idx}.cls_token''', "stage2.cls_token")) return token def lowercase_ ( ): lowercase__ : List[str] = [] head.append(("layernorm.weight", "norm.weight")) head.append(("layernorm.bias", "norm.bias")) head.append(("classifier.weight", "head.weight")) head.append(("classifier.bias", "head.bias")) return head def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]): lowercase__ : Optional[Any] = "imagenet-1k-id2label.json" lowercase__ : List[str] = 1000 lowercase__ : Dict = "huggingface/label-files" lowercase__ : List[Any] = num_labels lowercase__ : Tuple = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset")) , "r")) lowercase__ : Tuple = {int(_lowerCamelCase): v for k, v in idalabel.items()} lowercase__ : Any = idalabel lowercase__ : List[Any] = {v: k for k, v in idalabel.items()} lowercase__ : Optional[int] = CvtConfig(num_labels=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit("/" , 1)[-1][4:6] == "13": lowercase__ : Any = [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit("/" , 1)[-1][4:6] == "21": lowercase__ : Tuple = [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: lowercase__ : Union[str, Any] = [2, 2, 20] lowercase__ : Optional[Any] = [3, 12, 16] lowercase__ : Optional[Any] = [192, 768, 1024] lowercase__ : Union[str, Any] = CvtForImageClassification(_lowerCamelCase) lowercase__ : Tuple = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k") lowercase__ : int = image_size lowercase__ : Dict = torch.load(_lowerCamelCase , map_location=torch.device("cpu")) lowercase__ : Any = OrderedDict() lowercase__ : int = [] for idx in range(len(config.depth)): if config.cls_token[idx]: lowercase__ : Dict = list_of_state_dict + cls_token(_lowerCamelCase) lowercase__ : List[str] = list_of_state_dict + embeddings(_lowerCamelCase) for cnt in range(config.depth[idx]): lowercase__ : Any = list_of_state_dict + attention(_lowerCamelCase , _lowerCamelCase) lowercase__ : List[str] = list_of_state_dict + final() for gg in list_of_state_dict: print(_lowerCamelCase) for i in range(len(_lowerCamelCase)): lowercase__ : Dict = original_weights[list_of_state_dict[i][1]] model.load_state_dict(_lowerCamelCase) model.save_pretrained(_lowerCamelCase) image_processor.save_pretrained(_lowerCamelCase) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument( '''--cvt_model''', default='''cvt-w24''', type=str, help='''Name of the cvt model you\'d like to convert.''', ) parser.add_argument( '''--image_size''', default=384, type=int, help='''Input Image Size''', ) parser.add_argument( '''--cvt_file_name''', default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''', type=str, help='''Input Image Size''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) UpperCamelCase = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
87
0
from .imports import is_tqdm_available if is_tqdm_available(): from tqdm.auto import tqdm as _tqdm from ..state import PartialState def a_ ( SCREAMING_SNAKE_CASE__ : bool = True , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Optional[Any] ): '''simple docstring''' if not is_tqdm_available(): raise ImportError('Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.' ) _lowerCamelCase : Dict =False if main_process_only: _lowerCamelCase : int =PartialState().local_process_index == 0 return _tqdm(*_lowerCamelCase , **_lowerCamelCase , disable=_lowerCamelCase )
199
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase = { '''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''], '''tokenization_electra''': ['''ElectraTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''ElectraTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ElectraForCausalLM''', '''ElectraForMaskedLM''', '''ElectraForMultipleChoice''', '''ElectraForPreTraining''', '''ElectraForQuestionAnswering''', '''ElectraForSequenceClassification''', '''ElectraForTokenClassification''', '''ElectraModel''', '''ElectraPreTrainedModel''', '''load_tf_weights_in_electra''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFElectraForMaskedLM''', '''TFElectraForMultipleChoice''', '''TFElectraForPreTraining''', '''TFElectraForQuestionAnswering''', '''TFElectraForSequenceClassification''', '''TFElectraForTokenClassification''', '''TFElectraModel''', '''TFElectraPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''FlaxElectraForCausalLM''', '''FlaxElectraForMaskedLM''', '''FlaxElectraForMultipleChoice''', '''FlaxElectraForPreTraining''', '''FlaxElectraForQuestionAnswering''', '''FlaxElectraForSequenceClassification''', '''FlaxElectraForTokenClassification''', '''FlaxElectraModel''', '''FlaxElectraPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
87
0
"""simple docstring""" import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotSmallConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html __A : List[str] = '''platform''' import jax import jax.numpy as jnp from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, shift_tokens_right, ) def lowercase ( __snake_case : int , __snake_case : str , __snake_case : str=None , __snake_case : int=None , __snake_case : str=None , __snake_case : Optional[Any]=None , __snake_case : str=None , __snake_case : str=None , ): if attention_mask is None: lowercase_ : Dict = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: lowercase_ : Optional[Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: lowercase_ : str = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowercase_ : Dict = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: lowercase_ : int = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class _UpperCAmelCase : def __init__( self : Dict , A : Any , A : List[str]=13 , A : List[Any]=7 , A : int=True , A : Any=False , A : int=99 , A : Tuple=16 , A : Optional[Any]=2 , A : Union[str, Any]=4 , A : Dict=4 , A : Optional[Any]="gelu" , A : Optional[int]=0.1 , A : Tuple=0.1 , A : Optional[Any]=32 , A : Tuple=2 , A : Any=1 , A : List[str]=0 , A : Union[str, Any]=0.02 , ) -> Dict: lowercase_ : Optional[int] = parent lowercase_ : str = batch_size lowercase_ : str = seq_length lowercase_ : Tuple = is_training lowercase_ : Optional[Any] = use_labels lowercase_ : List[Any] = vocab_size lowercase_ : str = hidden_size lowercase_ : Dict = num_hidden_layers lowercase_ : Any = num_attention_heads lowercase_ : Dict = intermediate_size lowercase_ : Tuple = hidden_act lowercase_ : List[str] = hidden_dropout_prob lowercase_ : Union[str, Any] = attention_probs_dropout_prob lowercase_ : List[Any] = max_position_embeddings lowercase_ : Optional[int] = eos_token_id lowercase_ : List[Any] = pad_token_id lowercase_ : Union[str, Any] = bos_token_id lowercase_ : Optional[int] = initializer_range def A ( self : Optional[Any] ) -> Tuple: lowercase_ : Any = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) lowercase_ : Tuple = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) lowercase_ : Tuple = shift_tokens_right(lowercase_ , 1 , 2 ) lowercase_ : Union[str, Any] = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowercase_ , ) lowercase_ : List[Any] = prepare_blenderbot_inputs_dict(lowercase_ , lowercase_ , lowercase_ ) return config, inputs_dict def A ( self : Tuple ) -> int: lowercase_ : List[Any] = self.prepare_config_and_inputs() return config, inputs_dict def A ( self : Any , A : List[Any] , A : Any , A : List[Any] ) -> List[Any]: lowercase_ : Union[str, Any] = 20 lowercase_ : List[Any] = model_class_name(lowercase_ ) lowercase_ : Optional[int] = model.encode(inputs_dict['''input_ids'''] ) lowercase_ : Optional[int] = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) lowercase_ : Optional[int] = model.init_cache(decoder_input_ids.shape[0] , lowercase_ , lowercase_ ) lowercase_ : Tuple = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' ) lowercase_ : List[str] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowercase_ : int = model.decode( decoder_input_ids[:, :-1] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=lowercase_ , decoder_position_ids=lowercase_ , ) lowercase_ : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) lowercase_ : Dict = model.decode( decoder_input_ids[:, -1:] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase_ , ) lowercase_ : Union[str, Any] = model.decode(lowercase_ , lowercase_ ) lowercase_ : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' ) def A ( self : Any , A : Union[str, Any] , A : Dict , A : Any ) -> Union[str, Any]: lowercase_ : Optional[Any] = 20 lowercase_ : List[str] = model_class_name(lowercase_ ) lowercase_ : Any = model.encode(inputs_dict['''input_ids'''] ) lowercase_ : List[str] = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) lowercase_ : Tuple = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) lowercase_ : List[str] = model.init_cache(decoder_input_ids.shape[0] , lowercase_ , lowercase_ ) lowercase_ : Tuple = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowercase_ : str = model.decode( decoder_input_ids[:, :-1] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=lowercase_ , decoder_position_ids=lowercase_ , ) lowercase_ : Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) lowercase_ : int = model.decode( decoder_input_ids[:, -1:] , lowercase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase_ , decoder_position_ids=lowercase_ , ) lowercase_ : int = model.decode(lowercase_ , lowercase_ , decoder_attention_mask=lowercase_ ) lowercase_ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' ) @require_flax class _UpperCAmelCase ( unittest.TestCase ): SCREAMING_SNAKE_CASE_ : Dict = 99 def A ( self : Any ) -> Tuple: lowercase_ : Tuple = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) lowercase_ : Tuple = input_ids.shape[0] lowercase_ : Union[str, Any] = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def A ( self : str ) -> Tuple: lowercase_ : Optional[int] = self._get_config_and_data() lowercase_ : Tuple = FlaxBlenderbotSmallForConditionalGeneration(lowercase_ ) lowercase_ : List[str] = lm_model(input_ids=lowercase_ ) lowercase_ : int = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs['''logits'''].shape , lowercase_ ) def A ( self : Optional[Any] ) -> str: lowercase_ : List[Any] = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) lowercase_ : Dict = FlaxBlenderbotSmallForConditionalGeneration(lowercase_ ) lowercase_ : Optional[int] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa ) lowercase_ : Union[str, Any] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa ) lowercase_ : Union[str, Any] = lm_model(input_ids=lowercase_ , decoder_input_ids=lowercase_ ) lowercase_ : Optional[int] = (*summary.shape, config.vocab_size) self.assertEqual(outputs['''logits'''].shape , lowercase_ ) def A ( self : List[Any] ) -> str: lowercase_ : List[Any] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa ) lowercase_ : int = shift_tokens_right(lowercase_ , 1 , 2 ) lowercase_ : Optional[int] = np.equal(lowercase_ , 1 ).astype(np.floataa ).sum() lowercase_ : Any = np.equal(lowercase_ , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(lowercase_ , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class _UpperCAmelCase ( __A , unittest.TestCase , __A ): SCREAMING_SNAKE_CASE_ : List[str] = True SCREAMING_SNAKE_CASE_ : List[str] = ( ( FlaxBlenderbotSmallModel, FlaxBlenderbotSmallForConditionalGeneration, ) if is_flax_available() else () ) SCREAMING_SNAKE_CASE_ : str = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else () def A ( self : Any ) -> List[str]: lowercase_ : Dict = FlaxBlenderbotSmallModelTester(self ) def A ( self : str ) -> List[str]: lowercase_ : Any = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(lowercase_ , lowercase_ , lowercase_ ) def A ( self : Any ) -> List[str]: lowercase_ : Dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(lowercase_ , lowercase_ , lowercase_ ) def A ( self : Tuple ) -> List[str]: lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowercase_ : Optional[int] = self._prepare_for_class(lowercase_ , lowercase_ ) lowercase_ : Union[str, Any] = model_class(lowercase_ ) @jax.jit def encode_jitted(A : Any , A : List[Any]=None , **A : Dict ): return model.encode(input_ids=lowercase_ , attention_mask=lowercase_ ) with self.subTest('''JIT Enabled''' ): lowercase_ : List[str] = encode_jitted(**lowercase_ ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): lowercase_ : Any = encode_jitted(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) ) for jitted_output, output in zip(lowercase_ , lowercase_ ): self.assertEqual(jitted_output.shape , output.shape ) def A ( self : Union[str, Any] ) -> Optional[int]: lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowercase_ : int = model_class(lowercase_ ) lowercase_ : List[str] = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] ) lowercase_ : int = { "decoder_input_ids": inputs_dict["decoder_input_ids"], "decoder_attention_mask": inputs_dict["decoder_attention_mask"], "encoder_outputs": encoder_outputs, } @jax.jit def decode_jitted(A : Any , A : int , A : Optional[int] ): return model.decode( decoder_input_ids=lowercase_ , decoder_attention_mask=lowercase_ , encoder_outputs=lowercase_ , ) with self.subTest('''JIT Enabled''' ): lowercase_ : Any = decode_jitted(**lowercase_ ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): lowercase_ : Tuple = decode_jitted(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) ) for jitted_output, output in zip(lowercase_ , lowercase_ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def A ( self : Dict ) -> int: for model_class_name in self.all_model_classes: lowercase_ : List[Any] = model_class_name.from_pretrained('''facebook/blenderbot_small-90M''' ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids lowercase_ : Optional[Any] = np.ones((1, 1) ) * model.config.eos_token_id lowercase_ : str = model(lowercase_ ) self.assertIsNotNone(lowercase_ )
33
import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case_ ( __A ,unittest.TestCase ): __A : Union[str, Any] = LEDTokenizer __A : Union[str, Any] = LEDTokenizerFast __A : Optional[Any] = True def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]: super().setUp() lowercase__ : List[str] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] lowercase__ : Optional[int] = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) ) lowercase__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] lowercase__ : Tuple = {"unk_token": "<unk>"} lowercase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowercase_ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowercase_ ) ) def __UpperCamelCase ( self : int , **lowercase_ : str ) -> List[Any]: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ ) def __UpperCamelCase ( self : List[Any] , **lowercase_ : Any ) -> List[Any]: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ ) def __UpperCamelCase ( self : str , lowercase_ : Any ) -> Tuple: return "lower newer", "lower newer" @cached_property def __UpperCamelCase ( self : Tuple ) -> Optional[Any]: return LEDTokenizer.from_pretrained("allenai/led-base-16384" ) @cached_property def __UpperCamelCase ( self : Tuple ) -> int: return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" ) @require_torch def __UpperCamelCase ( self : int ) -> List[Any]: lowercase__ : Union[str, Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."] lowercase__ : str = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : Dict = tokenizer(lowercase_ , max_length=len(lowercase_ ) , padding=lowercase_ , return_tensors="pt" ) self.assertIsInstance(lowercase_ , lowercase_ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) lowercase__ : Union[str, Any] = batch.input_ids.tolist()[0] self.assertListEqual(lowercase_ , lowercase_ ) @require_torch def __UpperCamelCase ( self : List[str] ) -> Tuple: lowercase__ : Dict = ["A long paragraph for summarization.", "Another paragraph for summarization."] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : Optional[int] = tokenizer(lowercase_ , padding=lowercase_ , return_tensors="pt" ) self.assertIn("input_ids" , lowercase_ ) self.assertIn("attention_mask" , lowercase_ ) self.assertNotIn("labels" , lowercase_ ) self.assertNotIn("decoder_attention_mask" , lowercase_ ) @require_torch def __UpperCamelCase ( self : Optional[Any] ) -> Any: lowercase__ : Dict = [ "Summary of the text.", "Another summary.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : Dict = tokenizer(text_target=lowercase_ , max_length=32 , padding="max_length" , return_tensors="pt" ) self.assertEqual(32 , targets["input_ids"].shape[1] ) @require_torch def __UpperCamelCase ( self : Optional[int] ) -> Tuple: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : int = tokenizer( ["I am a small frog" * 10_24, "I am a small frog"] , padding=lowercase_ , truncation=lowercase_ , return_tensors="pt" ) self.assertIsInstance(lowercase_ , lowercase_ ) self.assertEqual(batch.input_ids.shape , (2, 51_22) ) @require_torch def __UpperCamelCase ( self : List[str] ) -> Any: lowercase__ : Union[str, Any] = ["A long paragraph for summarization."] lowercase__ : List[Any] = [ "Summary of the text.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : List[Any] = tokenizer(lowercase_ , return_tensors="pt" ) lowercase__ : Dict = tokenizer(text_target=lowercase_ , return_tensors="pt" ) lowercase__ : Optional[int] = inputs["input_ids"] lowercase__ : str = targets["input_ids"] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def __UpperCamelCase ( self : Union[str, Any] ) -> Dict: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : int = ["Summary of the text.", "Another summary."] lowercase__ : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] lowercase__ : Tuple = tokenizer(lowercase_ , padding=lowercase_ ) lowercase__ : int = [[0] * len(lowercase_ ) for x in encoded_output["input_ids"]] lowercase__ : Any = tokenizer.pad(lowercase_ ) self.assertSequenceEqual(outputs["global_attention_mask"] , lowercase_ ) def __UpperCamelCase ( self : int ) -> Union[str, Any]: pass def __UpperCamelCase ( self : int ) -> Optional[Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) lowercase__ : List[str] = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) lowercase__ : List[Any] = "A, <mask> AllenNLP sentence." lowercase__ : Tuple = tokenizer_r.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ ) lowercase__ : List[str] = tokenizer_p.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ ) self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) ) self.assertEqual( sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , ) lowercase__ : Any = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] ) lowercase__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] ) self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
87
0
'''simple docstring''' import argparse import os import transformers from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS from .utils import logging logging.set_verbosity_info() A__: Tuple = logging.get_logger(__name__) A__: Optional[Any] = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS} def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Any ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : int ) -> List[Any]: if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES: raise ValueError(F"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}." ) if tokenizer_name is None: _a : Optional[Any] =TOKENIZER_CLASSES else: _a : int ={tokenizer_name: getattr(_lowerCamelCase ,tokenizer_name + """Fast""" )} logger.info(F"Loading tokenizer classes: {tokenizer_names}" ) for tokenizer_name in tokenizer_names: _a : Union[str, Any] =TOKENIZER_CLASSES[tokenizer_name] _a : Optional[Any] =True if checkpoint_name is None: _a : str =list(tokenizer_class.max_model_input_sizes.keys() ) else: _a : Optional[Any] =[checkpoint_name] logger.info(F"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}" ) for checkpoint in checkpoint_names: logger.info(F"Loading {tokenizer_class.__class__.__name__} {checkpoint}" ) # Load tokenizer _a : Optional[Any] =tokenizer_class.from_pretrained(_lowerCamelCase ,force_download=_lowerCamelCase ) # Save fast tokenizer logger.info(F"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}" ) # For organization names we create sub-directories if "/" in checkpoint: _a : Union[str, Any] =checkpoint.split("""/""" ) _a : Any =os.path.join(_lowerCamelCase ,_lowerCamelCase ) elif add_prefix: _a : List[str] =checkpoint _a : List[str] =dump_path else: _a : Any =None _a : str =dump_path logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" ) if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]: _a : int =list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint] _a : int =file_path.split(_lowerCamelCase )[-1][0] if next_char == "/": _a : Union[str, Any] =os.path.join(_lowerCamelCase ,_lowerCamelCase ) _a : Dict =None logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" ) _a : List[Any] =tokenizer.save_pretrained( _lowerCamelCase ,legacy_format=_lowerCamelCase ,filename_prefix=_lowerCamelCase ) logger.info(F"=> File names {file_names}" ) for file_name in file_names: if not file_name.endswith("""tokenizer.json""" ): os.remove(_lowerCamelCase ) logger.info(F"=> removing {file_name}" ) if __name__ == "__main__": A__: int = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.''' ) parser.add_argument( '''--tokenizer_name''', default=None, type=str, help=( F"Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will " '''download and convert all the checkpoints from AWS.''' ), ) parser.add_argument( '''--checkpoint_name''', default=None, type=str, help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''', ) parser.add_argument( '''--force_download''', action='''store_true''', help='''Re-download checkpoints.''', ) A__: Any = parser.parse_args() convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
276
import math from typing import Any, Callable, List, Optional, Tuple, Union import numpy as np import torch from ...models import TaFilmDecoder from ...schedulers import DDPMScheduler from ...utils import is_onnx_available, logging, randn_tensor if is_onnx_available(): from ..onnx_utils import OnnxRuntimeModel from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .continous_encoder import SpectrogramContEncoder from .notes_encoder import SpectrogramNotesEncoder UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name UpperCamelCase = 256 class snake_case_ ( __A ): __A : str = ["melgan"] def __init__( self : str , lowercase_ : SpectrogramNotesEncoder , lowercase_ : SpectrogramContEncoder , lowercase_ : TaFilmDecoder , lowercase_ : DDPMScheduler , lowercase_ : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None: super().__init__() # From MELGAN lowercase__ : List[Any] = math.log(1E-5 ) # Matches MelGAN training. lowercase__ : str = 4.0 # Largest value for most examples lowercase__ : Any = 1_28 self.register_modules( notes_encoder=lowercase_ , continuous_encoder=lowercase_ , decoder=lowercase_ , scheduler=lowercase_ , melgan=lowercase_ , ) def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str]=(-1.0, 1.0) , lowercase_ : Dict=False ) -> Optional[Any]: lowercase__ , lowercase__ : int = output_range if clip: lowercase__ : Optional[Any] = torch.clip(lowercase_ , self.min_value , self.max_value ) # Scale to [0, 1]. lowercase__ : List[str] = (features - self.min_value) / (self.max_value - self.min_value) # Scale to [min_out, max_out]. return zero_one * (max_out - min_out) + min_out def __UpperCamelCase ( self : Optional[int] , lowercase_ : List[str] , lowercase_ : List[str]=(-1.0, 1.0) , lowercase_ : List[Any]=False ) -> Union[str, Any]: lowercase__ , lowercase__ : Tuple = input_range lowercase__ : Optional[Any] = torch.clip(lowercase_ , lowercase_ , lowercase_ ) if clip else outputs # Scale to [0, 1]. lowercase__ : Union[str, Any] = (outputs - min_out) / (max_out - min_out) # Scale to [self.min_value, self.max_value]. return zero_one * (self.max_value - self.min_value) + self.min_value def __UpperCamelCase ( self : List[str] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Tuple ) -> List[str]: lowercase__ : Optional[Any] = input_tokens > 0 lowercase__ , lowercase__ : int = self.notes_encoder( encoder_input_tokens=lowercase_ , encoder_inputs_mask=lowercase_ ) lowercase__ , lowercase__ : List[Any] = self.continuous_encoder( encoder_inputs=lowercase_ , encoder_inputs_mask=lowercase_ ) return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)] def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : str ) -> Tuple: lowercase__ : Union[str, Any] = noise_time if not torch.is_tensor(lowercase_ ): lowercase__ : Optional[Any] = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device ) elif torch.is_tensor(lowercase_ ) and len(timesteps.shape ) == 0: lowercase__ : Optional[Any] = timesteps[None].to(input_tokens.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML lowercase__ : int = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device ) lowercase__ : str = self.decoder( encodings_and_masks=lowercase_ , decoder_input_tokens=lowercase_ , decoder_noise_time=lowercase_ ) return logits @torch.no_grad() def __call__( self : List[str] , lowercase_ : List[List[int]] , lowercase_ : Optional[torch.Generator] = None , lowercase_ : int = 1_00 , lowercase_ : bool = True , lowercase_ : str = "numpy" , lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]: if (callback_steps is None) or ( callback_steps is not None and (not isinstance(lowercase_ , lowercase_ ) or callback_steps <= 0) ): raise ValueError( F'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' F''' {type(lowercase_ )}.''' ) lowercase__ : str = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa ) lowercase__ : Optional[int] = np.zeros([1, 0, self.n_dims] , np.floataa ) lowercase__ : str = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device ) for i, encoder_input_tokens in enumerate(lowercase_ ): if i == 0: lowercase__ : Union[str, Any] = torch.from_numpy(pred_mel[:1].copy() ).to( device=self.device , dtype=self.decoder.dtype ) # The first chunk has no previous context. lowercase__ : List[str] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device ) else: # The full song pipeline does not feed in a context feature, so the mask # will be all 0s after the feature converter. Because we know we're # feeding in a full context chunk from the previous prediction, set it # to all 1s. lowercase__ : str = ones lowercase__ : str = self.scale_features( lowercase_ , output_range=[-1.0, 1.0] , clip=lowercase_ ) lowercase__ : str = self.encode( input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=lowercase_ , continuous_mask=lowercase_ , ) # Sample encoder_continuous_inputs shaped gaussian noise to begin loop lowercase__ : List[str] = randn_tensor( shape=encoder_continuous_inputs.shape , generator=lowercase_ , device=self.device , dtype=self.decoder.dtype , ) # set step values self.scheduler.set_timesteps(lowercase_ ) # Denoising diffusion loop for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): lowercase__ : Optional[int] = self.decode( encodings_and_masks=lowercase_ , input_tokens=lowercase_ , noise_time=t / self.scheduler.config.num_train_timesteps , ) # Compute previous output: x_t -> x_t-1 lowercase__ : Optional[Any] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample lowercase__ : Tuple = self.scale_to_features(lowercase_ , input_range=[-1.0, 1.0] ) lowercase__ : List[str] = mel[:1] lowercase__ : Optional[int] = mel.cpu().float().numpy() lowercase__ : str = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 ) # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(lowercase_ , lowercase_ ) logger.info("Generated segment" , lowercase_ ) if output_type == "numpy" and not is_onnx_available(): raise ValueError( "Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." ) elif output_type == "numpy" and self.melgan is None: raise ValueError( "Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." ) if output_type == "numpy": lowercase__ : Union[str, Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) ) else: lowercase__ : Dict = full_pred_mel if not return_dict: return (output,) return AudioPipelineOutput(audios=lowercase_ )
87
0
'''simple docstring''' import argparse import os import re UpperCamelCase_ = """src/transformers""" # Pattern that looks at the indentation in a line. UpperCamelCase_ = re.compile(r"""^(\s*)\S""") # Pattern that matches `"key":" and puts `key` in group 0. UpperCamelCase_ = re.compile(r"""^\s*\"([^\"]+)\":""") # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. UpperCamelCase_ = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""") # Pattern that matches `"key",` and puts `key` in group 0. UpperCamelCase_ = re.compile(r"""^\s*\"([^\"]+)\",\s*$""") # Pattern that matches any `[stuff]` and puts `stuff` in group 0. UpperCamelCase_ = re.compile(r"""\[([^\]]+)\]""") def _UpperCAmelCase ( _lowerCamelCase : int ) -> Optional[int]: _lowerCAmelCase : str = _re_indent.search(_lowerCamelCase ) return "" if search is None else search.groups()[0] def _UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple="" , _lowerCamelCase : Any=None , _lowerCamelCase : Tuple=None ) -> List[str]: _lowerCAmelCase : Optional[Any] = 0 _lowerCAmelCase : Optional[int] = code.split("""\n""" ) if start_prompt is not None: while not lines[index].startswith(_lowerCamelCase ): index += 1 _lowerCAmelCase : str = ["\n".join(lines[:index] )] else: _lowerCAmelCase : List[Any] = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). _lowerCAmelCase : Any = [lines[index]] index += 1 while index < len(_lowerCamelCase ) and (end_prompt is None or not lines[index].startswith(_lowerCamelCase )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(_lowerCamelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ): current_block.append(lines[index] ) blocks.append("""\n""".join(_lowerCamelCase ) ) if index < len(_lowerCamelCase ) - 1: _lowerCAmelCase : List[str] = [lines[index + 1]] index += 1 else: _lowerCAmelCase : List[str] = [] else: blocks.append("""\n""".join(_lowerCamelCase ) ) _lowerCAmelCase : List[str] = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(_lowerCamelCase ) > 0: blocks.append("""\n""".join(_lowerCamelCase ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(_lowerCamelCase ): blocks.append("""\n""".join(lines[index:] ) ) return blocks def _UpperCAmelCase ( _lowerCamelCase : Optional[int] ) -> Optional[int]: def _inner(_lowerCamelCase : str ): return key(_lowerCamelCase ).lower().replace("""_""" , """""" ) return _inner def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : Any=None ) -> Union[str, Any]: # If no key is provided, we use a noop. def noop(_lowerCamelCase : str ): return x if key is None: _lowerCAmelCase : Any = noop # Constants are all uppercase, they go first. _lowerCAmelCase : Tuple = [obj for obj in objects if key(_lowerCamelCase ).isupper()] # Classes are not all uppercase but start with a capital, they go second. _lowerCAmelCase : str = [obj for obj in objects if key(_lowerCamelCase )[0].isupper() and not key(_lowerCamelCase ).isupper()] # Functions begin with a lowercase, they go last. _lowerCAmelCase : Any = [obj for obj in objects if not key(_lowerCamelCase )[0].isupper()] _lowerCAmelCase : Dict = ignore_underscore(_lowerCamelCase ) return sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase ) def _UpperCAmelCase ( _lowerCamelCase : str ) -> Tuple: # This inner function sort imports between [ ]. def _replace(_lowerCamelCase : List[Any] ): _lowerCAmelCase : Optional[Any] = match.groups()[0] if "," not in imports: return f'[{imports}]' _lowerCAmelCase : Optional[int] = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: _lowerCAmelCase : Optional[int] = keys[:-1] return "[" + ", ".join([f'"{k}"' for k in sort_objects(_lowerCamelCase )] ) + "]" _lowerCAmelCase : List[Any] = import_statement.split("""\n""" ) if len(_lowerCamelCase ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. _lowerCAmelCase : Dict = 2 if lines[1].strip() == "[" else 1 _lowerCAmelCase : Optional[Any] = [(i, _re_strip_line.search(_lowerCamelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] _lowerCAmelCase : Any = sort_objects(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] ) _lowerCAmelCase : List[str] = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(_lowerCamelCase ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: _lowerCAmelCase : Any = _re_bracket_content.sub(_replace , lines[1] ) else: _lowerCAmelCase : List[Any] = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: _lowerCAmelCase : Optional[Any] = keys[:-1] _lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + ", ".join([f'"{k}"' for k in sort_objects(_lowerCamelCase )] ) return "\n".join(_lowerCamelCase ) else: # Finally we have to deal with imports fitting on one line _lowerCAmelCase : Any = _re_bracket_content.sub(_replace , _lowerCamelCase ) return import_statement def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]=True ) -> Optional[int]: with open(_lowerCamelCase , encoding="""utf-8""" ) as f: _lowerCAmelCase : Any = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 _lowerCAmelCase : List[Any] = split_code_in_indented_blocks( _lowerCamelCase , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" ) # We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(_lowerCamelCase ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. _lowerCAmelCase : Optional[int] = main_blocks[block_idx] _lowerCAmelCase : Any = block.split("""\n""" ) # Get to the start of the imports. _lowerCAmelCase : int = 0 while line_idx < len(_lowerCamelCase ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: _lowerCAmelCase : List[str] = len(_lowerCamelCase ) else: line_idx += 1 if line_idx >= len(_lowerCamelCase ): continue # Ignore beginning and last line: they don't contain anything. _lowerCAmelCase : str = "\n".join(block_lines[line_idx:-1] ) _lowerCAmelCase : Optional[Any] = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. _lowerCAmelCase : List[Any] = split_code_in_indented_blocks(_lowerCamelCase , indent_level=_lowerCamelCase ) # We have two categories of import key: list or _import_structure[key].append/extend _lowerCAmelCase : Optional[int] = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. _lowerCAmelCase : Dict = [(pattern.search(_lowerCamelCase ).groups()[0] if pattern.search(_lowerCamelCase ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. _lowerCAmelCase : Optional[int] = [(i, key) for i, key in enumerate(_lowerCamelCase ) if key is not None] _lowerCAmelCase : List[Any] = [x[0] for x in sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. _lowerCAmelCase : Tuple = 0 _lowerCAmelCase : Tuple = [] for i in range(len(_lowerCamelCase ) ): if keys[i] is None: reorderded_blocks.append(internal_blocks[i] ) else: _lowerCAmelCase : int = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reorderded_blocks.append(_lowerCamelCase ) count += 1 # And we put our main block back together with its first and last line. _lowerCAmelCase : Any = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] ) if code != "\n".join(_lowerCamelCase ): if check_only: return True else: print(f'Overwriting {file}.' ) with open(_lowerCamelCase , """w""" , encoding="""utf-8""" ) as f: f.write("""\n""".join(_lowerCamelCase ) ) def _UpperCAmelCase ( _lowerCamelCase : List[Any]=True ) -> Any: _lowerCAmelCase : Optional[int] = [] for root, _, files in os.walk(_lowerCamelCase ): if "__init__.py" in files: _lowerCAmelCase : Optional[int] = sort_imports(os.path.join(_lowerCamelCase , """__init__.py""" ) , check_only=_lowerCamelCase ) if result: _lowerCAmelCase : List[str] = [os.path.join(_lowerCamelCase , """__init__.py""" )] if len(_lowerCamelCase ) > 0: raise ValueError(f'Would overwrite {len(_lowerCamelCase )} files, run `make style`.' ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""") UpperCamelCase_ = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
309
import unittest from datasets import load_dataset from transformers.pipelines import pipeline from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow @is_pipeline_test @require_torch class snake_case_ ( unittest.TestCase ): @require_torch def __UpperCamelCase ( self : Optional[int] ) -> List[Any]: lowercase__ : Union[str, Any] = pipeline( task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" ) lowercase__ : List[str] = load_dataset("ashraq/esc50" ) lowercase__ : List[Any] = dataset["train"]["audio"][-1]["array"] lowercase__ : Dict = audio_classifier(lowercase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] ) self.assertEqual( nested_simplify(lowercase_ ) , [{"score": 0.5_01, "label": "Sound of a dog"}, {"score": 0.4_99, "label": "Sound of vaccum cleaner"}] , ) @unittest.skip("No models are available in TF" ) def __UpperCamelCase ( self : str ) -> Optional[int]: pass @slow @require_torch def __UpperCamelCase ( self : List[str] ) -> int: lowercase__ : Tuple = pipeline( task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , ) # This is an audio of a dog lowercase__ : Union[str, Any] = load_dataset("ashraq/esc50" ) lowercase__ : Tuple = dataset["train"]["audio"][-1]["array"] lowercase__ : List[Any] = audio_classifier(lowercase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] ) self.assertEqual( nested_simplify(lowercase_ ) , [ {"score": 0.9_99, "label": "Sound of a dog"}, {"score": 0.0_01, "label": "Sound of vaccum cleaner"}, ] , ) lowercase__ : int = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] ) self.assertEqual( nested_simplify(lowercase_ ) , [ [ {"score": 0.9_99, "label": "Sound of a dog"}, {"score": 0.0_01, "label": "Sound of vaccum cleaner"}, ], ] * 5 , ) lowercase__ : Tuple = audio_classifier( [audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 ) self.assertEqual( nested_simplify(lowercase_ ) , [ [ {"score": 0.9_99, "label": "Sound of a dog"}, {"score": 0.0_01, "label": "Sound of vaccum cleaner"}, ], ] * 5 , ) @unittest.skip("No models are available in TF" ) def __UpperCamelCase ( self : Union[str, Any] ) -> Dict: pass
87
0
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class SCREAMING_SNAKE_CASE__ ( __A ): __SCREAMING_SNAKE_CASE = ["image_processor", "tokenizer"] __SCREAMING_SNAKE_CASE = "LayoutLMv3ImageProcessor" __SCREAMING_SNAKE_CASE = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast") def __init__( self,__lowerCamelCase=None,__lowerCamelCase=None,**__lowerCamelCase ): A__ = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''',lowercase_,) A__ = kwargs.pop('''feature_extractor''' ) A__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(lowercase_,lowercase_ ) def __call__( self,__lowerCamelCase,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = True,__lowerCamelCase = False,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = 0,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = False,__lowerCamelCase = False,__lowerCamelCase = False,__lowerCamelCase = False,__lowerCamelCase = True,__lowerCamelCase = None,**__lowerCamelCase,): # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( '''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( '''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' ) # first, apply the image processor A__ = self.image_processor(images=lowercase_,return_tensors=lowercase_ ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(lowercase_,lowercase_ ): A__ = [text] # add batch dimension (as the image processor always adds a batch dimension) A__ = features["words"] A__ = self.tokenizer( text=text if text is not None else features['''words'''],text_pair=text_pair if text_pair is not None else None,boxes=boxes if boxes is not None else features['''boxes'''],word_labels=lowercase_,add_special_tokens=lowercase_,padding=lowercase_,truncation=lowercase_,max_length=lowercase_,stride=lowercase_,pad_to_multiple_of=lowercase_,return_token_type_ids=lowercase_,return_attention_mask=lowercase_,return_overflowing_tokens=lowercase_,return_special_tokens_mask=lowercase_,return_offsets_mapping=lowercase_,return_length=lowercase_,verbose=lowercase_,return_tensors=lowercase_,**lowercase_,) # add pixel values A__ = features.pop('''pixel_values''' ) if return_overflowing_tokens is True: A__ = self.get_overflowing_images(lowercase_,encoded_inputs['''overflow_to_sample_mapping'''] ) A__ = images return encoded_inputs def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ): # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image A__ = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(lowercase_ ) != len(lowercase_ ): raise ValueError( '''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got''' f" {len(lowercase_ )} and {len(lowercase_ )}" ) return images_with_overflow def UpperCamelCase ( self,*__lowerCamelCase,**__lowerCamelCase ): return self.tokenizer.batch_decode(*lowercase_,**lowercase_ ) def UpperCamelCase ( self,*__lowerCamelCase,**__lowerCamelCase ): return self.tokenizer.decode(*lowercase_,**lowercase_ ) @property def UpperCamelCase ( self ): return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def UpperCamelCase ( self ): warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''',lowercase_,) return self.image_processor_class @property def UpperCamelCase ( self ): warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''',lowercase_,) return self.image_processor
193
import operator def lowercase_ ( _lowerCamelCase : list , _lowerCamelCase : bool = False , _lowerCamelCase : list | None = None): lowercase__ : int = operator.lt if reverse else operator.gt lowercase__ : str = solution or [] if not arr: return solution lowercase__ : List[str] = [arr.pop(0)] for i, item in enumerate(_lowerCamelCase): if _operator(_lowerCamelCase , sublist[-1]): sublist.append(_lowerCamelCase) arr.pop(_lowerCamelCase) # merging sublist into solution list if not solution: solution.extend(_lowerCamelCase) else: while sublist: lowercase__ : str = sublist.pop(0) for i, xx in enumerate(_lowerCamelCase): if not _operator(_lowerCamelCase , _lowerCamelCase): solution.insert(_lowerCamelCase , _lowerCamelCase) break else: solution.append(_lowerCamelCase) strand_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
87
0
from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) __UpperCamelCase : int = { 's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json', } class lowercase__ ( __A): UpperCamelCase_ = "open-llama" def __init__( self : Tuple , UpperCamelCase__ : str=10_0000 , UpperCamelCase__ : Any=4096 , UpperCamelCase__ : Dict=1_1008 , UpperCamelCase__ : Union[str, Any]=32 , UpperCamelCase__ : int=32 , UpperCamelCase__ : Tuple="silu" , UpperCamelCase__ : Any=2048 , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : Any=1E-6 , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[Any]=0 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : Any=2 , UpperCamelCase__ : Any=False , UpperCamelCase__ : int=True , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Any=None , **UpperCamelCase__ : Any , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = vocab_size SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size SCREAMING_SNAKE_CASE : Any = intermediate_size SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers SCREAMING_SNAKE_CASE : Dict = num_attention_heads SCREAMING_SNAKE_CASE : List[str] = hidden_act SCREAMING_SNAKE_CASE : Any = initializer_range SCREAMING_SNAKE_CASE : List[Any] = rms_norm_eps SCREAMING_SNAKE_CASE : Optional[int] = use_cache SCREAMING_SNAKE_CASE : Tuple = kwargs.pop( '''use_memorry_efficient_attention''' , lowercase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE : str = attention_dropout_prob SCREAMING_SNAKE_CASE : Tuple = use_stable_embedding SCREAMING_SNAKE_CASE : Dict = shared_input_output_embedding SCREAMING_SNAKE_CASE : Optional[Any] = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , tie_word_embeddings=lowercase_ , **lowercase_ , ) def __A ( self : Dict ): '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling , lowercase_ ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' f"""got {self.rope_scaling}""" ) SCREAMING_SNAKE_CASE : Any = self.rope_scaling.get('''type''' , lowercase_ ) SCREAMING_SNAKE_CASE : Tuple = self.rope_scaling.get('''factor''' , lowercase_ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"""`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}""" ) if rope_scaling_factor is None or not isinstance(lowercase_ , lowercase_ ) or rope_scaling_factor <= 1.0: raise ValueError(f"""`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}""" )
182
import time import warnings from abc import ABC from copy import deepcopy from typing import Optional import torch from ..utils import add_start_docstrings, logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = R''' Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs (`Dict[str, Any]`, *optional*): Additional stopping criteria specific kwargs. Return: `bool`. `False` indicates we should continue, `True` indicates we should stop. ''' class snake_case_ ( __A ): @add_start_docstrings(lowercase_ ) def __call__( self : Optional[Any] , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : List[str] ) -> bool: raise NotImplementedError("StoppingCriteria needs to be subclassed" ) class snake_case_ ( __A ): def __init__( self : Dict , lowercase_ : int , lowercase_ : Optional[int] = None ) -> List[str]: lowercase__ : str = max_length lowercase__ : Optional[int] = max_position_embeddings @add_start_docstrings(lowercase_ ) def __call__( self : Tuple , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Union[str, Any] ) -> bool: lowercase__ : str = input_ids.shape[-1] lowercase__ : Any = cur_len >= self.max_length if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: logger.warning_once( "This is a friendly reminder - the current text generation call will exceed the model's predefined " F'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe ''' "exceptions, performance degradation, or nothing at all." ) return is_done class snake_case_ ( __A ): def __init__( self : Tuple , lowercase_ : int , lowercase_ : int ) -> List[str]: warnings.warn( "The class `MaxNewTokensCriteria` is deprecated. " F'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` ''' "with `max_length = start_length + max_new_tokens` instead." , lowercase_ , ) lowercase__ : Optional[int] = start_length lowercase__ : str = max_new_tokens lowercase__ : Tuple = start_length + max_new_tokens @add_start_docstrings(lowercase_ ) def __call__( self : List[Any] , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Dict ) -> bool: return input_ids.shape[-1] >= self.max_length class snake_case_ ( __A ): def __init__( self : Tuple , lowercase_ : float , lowercase_ : Optional[float] = None ) -> Dict: lowercase__ : List[str] = max_time lowercase__ : Tuple = time.time() if initial_timestamp is None else initial_timestamp @add_start_docstrings(lowercase_ ) def __call__( self : int , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Union[str, Any] ) -> bool: return time.time() - self.initial_timestamp > self.max_time class snake_case_ ( __A ): @add_start_docstrings(lowercase_ ) def __call__( self : str , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : List[str] ) -> bool: return any(criteria(lowercase_ , lowercase_ ) for criteria in self ) @property def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]: for stopping_criterium in self: if isinstance(lowercase_ , lowercase_ ): return stopping_criterium.max_length elif isinstance(lowercase_ , lowercase_ ): return stopping_criterium.max_length return None def lowercase_ ( _lowerCamelCase : StoppingCriteriaList , _lowerCamelCase : int): lowercase__ : Optional[int] = stopping_criteria.max_length lowercase__ : str = deepcopy(_lowerCamelCase) if stopping_max_length is not None and stopping_max_length != max_length: warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , _lowerCamelCase) elif stopping_max_length is None: new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowerCamelCase)) return new_stopping_criteria
87
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCAmelCase : List[Any] ={ 'configuration_blip_2': [ 'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Blip2Config', 'Blip2QFormerConfig', 'Blip2VisionConfig', ], 'processing_blip_2': ['Blip2Processor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : int =[ 'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST', 'Blip2Model', 'Blip2QFormerModel', 'Blip2PreTrainedModel', 'Blip2ForConditionalGeneration', 'Blip2VisionModel', ] if TYPE_CHECKING: from .configuration_blip_a import ( BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipaConfig, BlipaQFormerConfig, BlipaVisionConfig, ) from .processing_blip_a import BlipaProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip_a import ( BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST, BlipaForConditionalGeneration, BlipaModel, BlipaPreTrainedModel, BlipaQFormerModel, BlipaVisionModel, ) else: import sys __lowerCAmelCase : Union[str, Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
9
from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def lowercase_ ( _lowerCamelCase : Dict[str, torch.Tensor]): lowercase__ : Any = [] lowercase__ : Optional[int] = [] lowercase__ : Tuple = [] for rt in rc.restypes: lowercase__ : Dict = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]] restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names]) lowercase__ : str = {name: i for i, name in enumerate(_lowerCamelCase)} restype_atomaa_to_atomaa_list.append( [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types]) restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names]) # Add dummy mapping for restype 'UNK' restype_atomaa_to_atomaa_list.append([0] * 14) restype_atomaa_to_atomaa_list.append([0] * 37) restype_atomaa_mask_list.append([0.0] * 14) lowercase__ : Union[str, Any] = torch.tensor( _lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , ) lowercase__ : str = torch.tensor( _lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , ) lowercase__ : List[str] = torch.tensor( _lowerCamelCase , dtype=torch.floataa , device=protein["aatype"].device , ) lowercase__ : str = protein["aatype"].to(torch.long) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein lowercase__ : Dict = restype_atomaa_to_atomaa[protein_aatype] lowercase__ : str = restype_atomaa_mask[protein_aatype] lowercase__ : List[Any] = residx_atomaa_mask lowercase__ : Optional[Any] = residx_atomaa_to_atomaa.long() # create the gather indices for mapping back lowercase__ : str = restype_atomaa_to_atomaa[protein_aatype] lowercase__ : str = residx_atomaa_to_atomaa.long() # create the corresponding mask lowercase__ : Optional[Any] = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["aatype"].device) for restype, restype_letter in enumerate(rc.restypes): lowercase__ : Tuple = rc.restype_atoa[restype_letter] lowercase__ : List[Any] = rc.residue_atoms[restype_name] for atom_name in atom_names: lowercase__ : Optional[int] = rc.atom_order[atom_name] lowercase__ : Tuple = 1 lowercase__ : Dict = restype_atomaa_mask[protein_aatype] lowercase__ : Any = residx_atomaa_mask return protein def lowercase_ ( _lowerCamelCase : Dict[str, torch.Tensor]): lowercase__ : Tuple = tree_map(lambda _lowerCamelCase: torch.tensor(_lowerCamelCase , device=batch["aatype"].device) , _lowerCamelCase , np.ndarray) lowercase__ : List[str] = tensor_tree_map(lambda _lowerCamelCase: np.array(_lowerCamelCase) , make_atomaa_masks(_lowerCamelCase)) return out
87
0
"""simple docstring""" __UpperCamelCase : Optional[int] = [0, 2, 4, 6, 8] __UpperCamelCase : Optional[int] = [1, 3, 5, 7, 9] def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ): if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1 , -1 , -1 ): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 10 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 lowerCAmelCase__ : str = 0 for digit in range(10 ): lowerCAmelCase__ : str = digit result += reversible_numbers( 0 , (remainder + 2 * digit) // 10 , _lowerCamelCase , _lowerCamelCase ) return result lowerCAmelCase__ : Dict = 0 for digita in range(10 ): lowerCAmelCase__ : int = digita if (remainder + digita) % 2 == 0: lowerCAmelCase__ : Optional[Any] = ODD_DIGITS else: lowerCAmelCase__ : str = EVEN_DIGITS for digita in other_parity_digits: lowerCAmelCase__ : List[str] = digita result += reversible_numbers( remaining_length - 2 , (remainder + digita + digita) // 10 , _lowerCamelCase , _lowerCamelCase , ) return result def __SCREAMING_SNAKE_CASE ( A_ = 9 ): lowerCAmelCase__ : Tuple = 0 for length in range(1 , max_power + 1 ): result += reversible_numbers(_lowerCamelCase , 0 , [0] * length , _lowerCamelCase ) return result if __name__ == "__main__": print(F'''{solution() = }''')
106
import unittest from transformers import BigBirdConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax from transformers.models.big_bird.modeling_flax_big_bird import ( FlaxBigBirdForCausalLM, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForPreTraining, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, FlaxBigBirdModel, ) class snake_case_ ( unittest.TestCase ): def __init__( self : Tuple , lowercase_ : List[Any] , lowercase_ : Union[str, Any]=2 , lowercase_ : Union[str, Any]=56 , lowercase_ : Tuple=True , lowercase_ : Optional[Any]=True , lowercase_ : Optional[Any]=True , lowercase_ : int=True , lowercase_ : Any=99 , lowercase_ : int=32 , lowercase_ : str=2 , lowercase_ : Union[str, Any]=2 , lowercase_ : Dict=7 , lowercase_ : Dict="gelu_new" , lowercase_ : Tuple=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : Tuple=5_12 , lowercase_ : Optional[Any]=16 , lowercase_ : List[Any]=2 , lowercase_ : Dict=0.02 , lowercase_ : int=4 , lowercase_ : Tuple="block_sparse" , lowercase_ : Dict=True , lowercase_ : Optional[int]=False , lowercase_ : Dict=2 , lowercase_ : int=3 , ) -> Union[str, Any]: lowercase__ : Dict = parent lowercase__ : Dict = batch_size lowercase__ : Tuple = seq_length lowercase__ : Dict = is_training lowercase__ : Dict = use_attention_mask lowercase__ : Tuple = use_token_type_ids lowercase__ : Optional[int] = use_labels lowercase__ : List[Any] = vocab_size lowercase__ : Any = hidden_size lowercase__ : List[Any] = num_hidden_layers lowercase__ : Union[str, Any] = num_attention_heads lowercase__ : str = intermediate_size lowercase__ : int = hidden_act lowercase__ : str = hidden_dropout_prob lowercase__ : List[str] = attention_probs_dropout_prob lowercase__ : Optional[Any] = max_position_embeddings lowercase__ : Union[str, Any] = type_vocab_size lowercase__ : Dict = type_sequence_label_size lowercase__ : Any = initializer_range lowercase__ : List[str] = num_choices lowercase__ : str = rescale_embeddings lowercase__ : Optional[Any] = attention_type lowercase__ : Optional[int] = use_bias lowercase__ : Optional[int] = block_size lowercase__ : str = num_random_blocks def __UpperCamelCase ( self : str ) -> Optional[Any]: lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ : str = None if self.use_attention_mask: lowercase__ : Any = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ : Optional[int] = None if self.use_token_type_ids: lowercase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase__ : int = BigBirdConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , ) return config, input_ids, token_type_ids, attention_mask def __UpperCamelCase ( self : Union[str, Any] ) -> int: lowercase__ : int = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs lowercase__ : Union[str, Any] = { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_flax class snake_case_ ( __A ,unittest.TestCase ): __A : Optional[int] = ( ( FlaxBigBirdForCausalLM, FlaxBigBirdModel, FlaxBigBirdForPreTraining, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, ) if is_flax_available() else () ) __A : List[str] = False __A : Any = False def __UpperCamelCase ( self : List[str] ) -> List[Any]: lowercase__ : Union[str, Any] = FlaxBigBirdModelTester(self ) @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : Optional[int] ) -> Dict: super().test_from_pretrained_save_pretrained() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : List[str] ) -> Any: super().test_from_pretrained_with_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : Tuple ) -> str: super().test_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : Dict ) -> Union[str, Any]: super().test_hidden_states_output() @slow def __UpperCamelCase ( self : Optional[int] ) -> Tuple: for model_class_name in self.all_model_classes: lowercase__ : Optional[Any] = model_class_name.from_pretrained("google/bigbird-roberta-base" ) self.assertIsNotNone(lowercase_ ) def __UpperCamelCase ( self : int ) -> Optional[int]: if self.test_attn_probs: super().test_attention_outputs() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : str ) -> Any: lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowercase__ : Union[str, Any] = self._prepare_for_class(lowercase_ , lowercase_ ) lowercase__ : Optional[Any] = model_class(lowercase_ ) @jax.jit def model_jitted(lowercase_ : Tuple , lowercase_ : int=None , **lowercase_ : Dict ): return model(input_ids=lowercase_ , attention_mask=lowercase_ , **lowercase_ ) with self.subTest("JIT Enabled" ): lowercase__ : int = model_jitted(**lowercase_ ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): lowercase__ : Any = model_jitted(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) ) for jitted_output, output in zip(lowercase_ , lowercase_ ): self.assertEqual(jitted_output.shape , output.shape ) def __UpperCamelCase ( self : List[Any] , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : List[Any]=1E-5 , lowercase_ : Any="outputs" , lowercase_ : List[str]=None ) -> List[Any]: # `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version, # an effort was done to return `attention_probs` (yet to be verified). if name.startswith("outputs.attentions" ): return else: super().check_pt_flax_outputs(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
87
0
import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def UpperCamelCase ( __magic_name__ : int ) -> Dict: """simple docstring""" monkeypatch.setattr("""datasets.utils.deprecation_utils._emitted_deprecation_warnings""" , set() ) @pytest.fixture def UpperCamelCase ( __magic_name__ : Union[str, Any] ) -> str: """simple docstring""" class A : '''simple docstring''' def __init__(self : int , _UpperCAmelCase : Dict ) -> Tuple: """simple docstring""" lowercase__ = metric_id class A : '''simple docstring''' A__ = [MetricMock(__A ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]] def lowerCamelCase__ (self : Tuple ) -> Tuple: """simple docstring""" return self._metrics monkeypatch.setattr("""datasets.inspect.huggingface_hub""" , HfhMock() ) @pytest.mark.parametrize( """func, args""" , [(load_metric, ("""metrics/mse""",)), (list_metrics, ()), (inspect_metric, ("""metrics/mse""", """tmp_path"""))] ) def UpperCamelCase ( __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : Dict ) -> Optional[int]: """simple docstring""" if "tmp_path" in args: lowercase__ = tuple(arg if arg != """tmp_path""" else tmp_path for arg in args ) with pytest.warns(_lowerCamelCase , match="""https://huggingface.co/docs/evaluate""" ): func(*_lowerCamelCase )
305
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCamelCase = { '''configuration_groupvit''': [ '''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GroupViTConfig''', '''GroupViTOnnxConfig''', '''GroupViTTextConfig''', '''GroupViTVisionConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GroupViTModel''', '''GroupViTPreTrainedModel''', '''GroupViTTextModel''', '''GroupViTVisionModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFGroupViTModel''', '''TFGroupViTPreTrainedModel''', '''TFGroupViTTextModel''', '''TFGroupViTVisionModel''', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
87
0