code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _a ( _snake_case , _snake_case=False ):
"""simple docstring"""
try:
UpperCAmelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCAmelCase = default
else:
# KEY is set, convert it to True or False.
try:
UpperCAmelCase = strtobool(__UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'''If set, {key} must be yes or no.''' )
return _value
_UpperCamelCase = parse_flag_from_env("""RUN_SLOW""", default=False)
_UpperCamelCase = parse_flag_from_env("""RUN_REMOTE""", default=False)
_UpperCamelCase = parse_flag_from_env("""RUN_LOCAL""", default=True)
_UpperCamelCase = parse_flag_from_env("""RUN_PACKAGED""", default=True)
# Compression
_UpperCamelCase = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="""test requires lz4""")
_UpperCamelCase = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="""test requires py7zr""")
_UpperCamelCase = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="""test requires zstandard""")
# Audio
_UpperCamelCase = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("""soundfile""") is None or version.parse(importlib_metadata.version("""soundfile""")) < version.parse("""0.12.0"""),
reason="""test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; """,
)
# Beam
_UpperCamelCase = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("""0.3.2"""),
reason="""test requires apache-beam and a compatible dill version""",
)
# Dill-cloudpickle compatibility
_UpperCamelCase = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("""0.3.2"""),
reason="""test requires dill>0.3.2 for cloudpickle compatibility""",
)
# Windows
_UpperCamelCase = pytest.mark.skipif(
sys.platform == """win32""",
reason="""test should not be run on Windows""",
)
def _a ( _snake_case ):
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
UpperCAmelCase = unittest.skip("""test requires faiss""" )(__UpperCamelCase )
return test_case
def _a ( _snake_case ):
"""simple docstring"""
try:
import regex # noqa
except ImportError:
UpperCAmelCase = unittest.skip("""test requires regex""" )(__UpperCamelCase )
return test_case
def _a ( _snake_case ):
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
UpperCAmelCase = unittest.skip("""test requires elasticsearch""" )(__UpperCamelCase )
return test_case
def _a ( _snake_case ):
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
UpperCAmelCase = unittest.skip("""test requires sqlalchemy""" )(__UpperCamelCase )
return test_case
def _a ( _snake_case ):
"""simple docstring"""
if not config.TORCH_AVAILABLE:
UpperCAmelCase = unittest.skip("""test requires PyTorch""" )(__UpperCamelCase )
return test_case
def _a ( _snake_case ):
"""simple docstring"""
if not config.TF_AVAILABLE:
UpperCAmelCase = unittest.skip("""test requires TensorFlow""" )(__UpperCamelCase )
return test_case
def _a ( _snake_case ):
"""simple docstring"""
if not config.JAX_AVAILABLE:
UpperCAmelCase = unittest.skip("""test requires JAX""" )(__UpperCamelCase )
return test_case
def _a ( _snake_case ):
"""simple docstring"""
if not config.PIL_AVAILABLE:
UpperCAmelCase = unittest.skip("""test requires Pillow""" )(__UpperCamelCase )
return test_case
def _a ( _snake_case ):
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("""test requires transformers""" )(__UpperCamelCase )
else:
return test_case
def _a ( _snake_case ):
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("""test requires tiktoken""" )(__UpperCamelCase )
else:
return test_case
def _a ( _snake_case ):
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("""test requires spacy""" )(__UpperCamelCase )
else:
return test_case
def _a ( _snake_case ):
"""simple docstring"""
def _require_spacy_model(_snake_case ):
try:
import spacy # noqa F401
spacy.load(__UpperCamelCase )
except ImportError:
return unittest.skip("""test requires spacy""" )(__UpperCamelCase )
except OSError:
return unittest.skip("""test requires spacy model '{}'""".format(__UpperCamelCase ) )(__UpperCamelCase )
else:
return test_case
return _require_spacy_model
def _a ( _snake_case ):
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("""test requires pyspark""" )(__UpperCamelCase )
else:
return test_case
def _a ( _snake_case ):
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("""test requires joblibspark""" )(__UpperCamelCase )
else:
return test_case
def _a ( _snake_case ):
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
UpperCAmelCase = unittest.skip("""test is slow""" )(__UpperCamelCase )
return test_case
def _a ( _snake_case ):
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
UpperCAmelCase = unittest.skip("""test is local""" )(__UpperCamelCase )
return test_case
def _a ( _snake_case ):
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
UpperCAmelCase = unittest.skip("""test is packaged""" )(__UpperCamelCase )
return test_case
def _a ( _snake_case ):
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
UpperCAmelCase = unittest.skip("""test requires remote""" )(__UpperCamelCase )
return test_case
def _a ( *_snake_case ):
"""simple docstring"""
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(__UpperCamelCase ) and name.startswith("""test""" ):
for decorator in decorators:
UpperCAmelCase = decorator(__UpperCamelCase )
setattr(cls , __UpperCamelCase , __UpperCamelCase )
return cls
return decorate
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
pass
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 2
@contextmanager
def _a ( _snake_case=OfflineSimulationMode.CONNECTION_FAILS , _snake_case=1E-16 ):
"""simple docstring"""
UpperCAmelCase = requests.Session().request
def timeout_request(_snake_case , _snake_case , _snake_case , **_snake_case ):
# Change the url to an invalid url so that the connection hangs
UpperCAmelCase = """https://10.255.255.1"""
if kwargs.get("""timeout""" ) is None:
raise RequestWouldHangIndefinitelyError(
F'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
UpperCAmelCase = timeout
try:
return online_request(__UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
UpperCAmelCase = url
UpperCAmelCase = e.args[0]
UpperCAmelCase = (max_retry_error.args[0].replace("""10.255.255.1""" , F'''OfflineMock[{url}]''' ),)
UpperCAmelCase = (max_retry_error,)
raise
def raise_connection_error(_snake_case , _snake_case , **_snake_case ):
raise requests.ConnectionError("""Offline mode is enabled.""" , request=__UpperCamelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("""requests.Session.send""" , __UpperCamelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("""requests.Session.request""" , __UpperCamelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("""datasets.config.HF_DATASETS_OFFLINE""" , __UpperCamelCase ):
yield
else:
raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" )
@contextmanager
def _a ( *_snake_case , **_snake_case ):
"""simple docstring"""
UpperCAmelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__UpperCamelCase , **__UpperCamelCase ) as tmp_dir:
try:
os.chdir(__UpperCamelCase )
yield
finally:
os.chdir(__UpperCamelCase )
@contextmanager
def _a ( ):
"""simple docstring"""
import gc
gc.collect()
UpperCAmelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _a ( ):
"""simple docstring"""
import gc
gc.collect()
UpperCAmelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
return deepcopy(__UpperCamelCase ).integers(0 , 100 , 10 ).tolist() == deepcopy(__UpperCamelCase ).integers(0 , 100 , 10 ).tolist()
def _a ( _snake_case ):
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(_snake_case , *_snake_case , **_snake_case ):
try:
return func(*__UpperCamelCase , **__UpperCamelCase )
except HTTPError as err:
if str(__UpperCamelCase ).startswith("""500""" ) or str(__UpperCamelCase ).startswith("""502""" ):
pytest.xfail(str(__UpperCamelCase ) )
raise err
return decorator.decorator(_wrapper , __UpperCamelCase )
class lowerCamelCase__ :
def __init__( self ,A ,A ,A ):
UpperCAmelCase = returncode
UpperCAmelCase = stdout
UpperCAmelCase = stderr
async def _a ( _snake_case , _snake_case ):
"""simple docstring"""
while True:
UpperCAmelCase = await stream.readline()
if line:
callback(__UpperCamelCase )
else:
break
async def _a ( _snake_case , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case=False , _snake_case=False ):
"""simple docstring"""
if echo:
print("""\nRunning: """ , """ """.join(__UpperCamelCase ) )
UpperCAmelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCAmelCase = []
UpperCAmelCase = []
def tee(_snake_case , _snake_case , _snake_case , _snake_case="" ):
UpperCAmelCase = line.decode("""utf-8""" ).rstrip()
sink.append(__UpperCamelCase )
if not quiet:
print(__UpperCamelCase , __UpperCamelCase , file=__UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _snake_case : tee(__UpperCamelCase , __UpperCamelCase , sys.stdout , label="""stdout:""" ) ),
_read_stream(p.stderr , lambda _snake_case : tee(__UpperCamelCase , __UpperCamelCase , sys.stderr , label="""stderr:""" ) ),
] , timeout=__UpperCamelCase , )
return _RunOutput(await p.wait() , __UpperCamelCase , __UpperCamelCase )
def _a ( _snake_case , _snake_case=None , _snake_case=None , _snake_case=180 , _snake_case=False , _snake_case=True ):
"""simple docstring"""
UpperCAmelCase = asyncio.get_event_loop()
UpperCAmelCase = loop.run_until_complete(
_stream_subprocess(__UpperCamelCase , env=__UpperCamelCase , stdin=__UpperCamelCase , timeout=__UpperCamelCase , quiet=__UpperCamelCase , echo=__UpperCamelCase ) )
UpperCAmelCase = """ """.join(__UpperCamelCase )
if result.returncode > 0:
UpperCAmelCase = """\n""".join(result.stderr )
raise RuntimeError(
F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
F'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F'''\'{cmd_str}\' produced no output.''' )
return result
def _a ( ):
"""simple docstring"""
UpperCAmelCase = os.environ.get("""PYTEST_XDIST_WORKER""" , """gw0""" )
UpperCAmelCase = re.sub(R"""^gw""" , """""" , __UpperCamelCase , 0 , re.M )
return int(__UpperCamelCase )
def _a ( ):
"""simple docstring"""
UpperCAmelCase = 2_9500
UpperCAmelCase = pytest_xdist_worker_id()
return port + uniq_delta
| 721 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
_UpperCamelCase = """https://www.indeed.co.in/jobs?q=mobile+app+development&l="""
def _a ( _snake_case = "mumbai" ):
"""simple docstring"""
UpperCAmelCase = BeautifulSoup(requests.get(url + location ).content , """html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""" , attrs={"""data-tn-component""": """organicJob"""} ):
UpperCAmelCase = job.find("""a""" , attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
UpperCAmelCase = job.find("""span""" , {"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("""Bangalore"""), 1):
print(F"""Job {i:>2} is {job[0]} at {job[1]}""")
| 74 | 0 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase__ ( _lowercase , unittest.TestCase ):
SCREAMING_SNAKE_CASE = LEDTokenizer
SCREAMING_SNAKE_CASE = LEDTokenizerFast
SCREAMING_SNAKE_CASE = True
def _UpperCamelCase ( self ):
super().setUp()
UpperCAmelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
UpperCAmelCase = dict(zip(A_ ,range(len(A_ ) ) ) )
UpperCAmelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
UpperCAmelCase = {"""unk_token""": """<unk>"""}
UpperCAmelCase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(A_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(A_ ) )
def _UpperCamelCase ( self ,**A ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**A_ )
def _UpperCamelCase ( self ,**A ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**A_ )
def _UpperCamelCase ( self ,A ):
return "lower newer", "lower newer"
@cached_property
def _UpperCamelCase ( self ):
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def _UpperCamelCase ( self ):
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def _UpperCamelCase ( self ):
UpperCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
UpperCAmelCase = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(A_ ,max_length=len(A_ ) ,padding=A_ ,return_tensors="""pt""" )
self.assertIsInstance(A_ ,A_ )
self.assertEqual((2, 9) ,batch.input_ids.shape )
self.assertEqual((2, 9) ,batch.attention_mask.shape )
UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(A_ ,A_ )
@require_torch
def _UpperCamelCase ( self ):
UpperCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(A_ ,padding=A_ ,return_tensors="""pt""" )
self.assertIn("""input_ids""" ,A_ )
self.assertIn("""attention_mask""" ,A_ )
self.assertNotIn("""labels""" ,A_ )
self.assertNotIn("""decoder_attention_mask""" ,A_ )
@require_torch
def _UpperCamelCase ( self ):
UpperCAmelCase = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(text_target=A_ ,max_length=32 ,padding="""max_length""" ,return_tensors="""pt""" )
self.assertEqual(32 ,targets["""input_ids"""].shape[1] )
@require_torch
def _UpperCamelCase ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(
["""I am a small frog""" * 1_024, """I am a small frog"""] ,padding=A_ ,truncation=A_ ,return_tensors="""pt""" )
self.assertIsInstance(A_ ,A_ )
self.assertEqual(batch.input_ids.shape ,(2, 5_122) )
@require_torch
def _UpperCamelCase ( self ):
UpperCAmelCase = ["""A long paragraph for summarization."""]
UpperCAmelCase = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(A_ ,return_tensors="""pt""" )
UpperCAmelCase = tokenizer(text_target=A_ ,return_tensors="""pt""" )
UpperCAmelCase = inputs["""input_ids"""]
UpperCAmelCase = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def _UpperCamelCase ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = ["""Summary of the text.""", """Another summary."""]
UpperCAmelCase = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
UpperCAmelCase = tokenizer(A_ ,padding=A_ )
UpperCAmelCase = [[0] * len(A_ ) for x in encoded_output["""input_ids"""]]
UpperCAmelCase = tokenizer.pad(A_ )
self.assertSequenceEqual(outputs["""global_attention_mask"""] ,A_ )
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(A_ ,**A_ )
UpperCAmelCase = self.tokenizer_class.from_pretrained(A_ ,**A_ )
UpperCAmelCase = """A, <mask> AllenNLP sentence."""
UpperCAmelCase = tokenizer_r.encode_plus(A_ ,add_special_tokens=A_ ,return_token_type_ids=A_ )
UpperCAmelCase = tokenizer_p.encode_plus(A_ ,add_special_tokens=A_ ,return_token_type_ids=A_ )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) ,sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) ,sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) ,)
UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] ,[0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] ,[0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
A_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
A_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 700 |
"""simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCamelCase ( self ):
UpperCAmelCase = ["""a""", """b""", """c"""]
# Defaults to last layer if both are None
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(A ,A ,A )
self.assertEqual(A ,["""c"""] )
self.assertEqual(A ,[2] )
# Out indices set to match out features
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(["""a""", """c"""] ,A ,A )
self.assertEqual(A ,["""a""", """c"""] )
self.assertEqual(A ,[0, 2] )
# Out features set to match out indices
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(A ,[0, 2] ,A )
self.assertEqual(A ,["""a""", """c"""] )
self.assertEqual(A ,[0, 2] )
# Out features selected from negative indices
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(A ,[-3, -1] ,A )
self.assertEqual(A ,["""a""", """c"""] )
self.assertEqual(A ,[-3, -1] )
def _UpperCamelCase ( self ):
# Stage names must be set
with self.assertRaises(A ):
verify_out_features_out_indices(["""a""", """b"""] ,(0, 1) ,A )
# Out features must be a list
with self.assertRaises(A ):
verify_out_features_out_indices(("""a""", """b""") ,(0, 1) ,["""a""", """b"""] )
# Out features must be a subset of stage names
with self.assertRaises(A ):
verify_out_features_out_indices(["""a""", """b"""] ,(0, 1) ,["""a"""] )
# Out indices must be a list or tuple
with self.assertRaises(A ):
verify_out_features_out_indices(A ,0 ,["""a""", """b"""] )
# Out indices must be a subset of stage names
with self.assertRaises(A ):
verify_out_features_out_indices(A ,(0, 1) ,["""a"""] )
# Out features and out indices must be the same length
with self.assertRaises(A ):
verify_out_features_out_indices(["""a""", """b"""] ,(0,) ,["""a""", """b""", """c"""] )
# Out features should match out indices
with self.assertRaises(A ):
verify_out_features_out_indices(["""a""", """b"""] ,(0, 2) ,["""a""", """b""", """c"""] )
# Out features and out indices should be in order
with self.assertRaises(A ):
verify_out_features_out_indices(["""b""", """a"""] ,(0, 1) ,["""a""", """b"""] )
# Check passes with valid inputs
verify_out_features_out_indices(["""a""", """b""", """d"""] ,(0, 1, -1) ,["""a""", """b""", """c""", """d"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = BackboneMixin()
UpperCAmelCase = ["""a""", """b""", """c"""]
UpperCAmelCase = ["""a""", """c"""]
UpperCAmelCase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features ,["""a""", """c"""] )
self.assertEqual(backbone.out_indices ,[0, 2] )
# Check out features and indices are updated correctly
UpperCAmelCase = ["""a""", """b"""]
self.assertEqual(backbone.out_features ,["""a""", """b"""] )
self.assertEqual(backbone.out_indices ,[0, 1] )
UpperCAmelCase = [-3, -1]
self.assertEqual(backbone.out_features ,["""a""", """c"""] )
self.assertEqual(backbone.out_indices ,[-3, -1] )
| 74 | 0 |
"""simple docstring"""
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""")
class lowerCamelCase__ :
def __init__( self ,A ,A ,A = True ,A = False ):
UpperCAmelCase = scheduler
UpperCAmelCase = optimizers if isinstance(A ,(list, tuple) ) else [optimizers]
UpperCAmelCase = split_batches
UpperCAmelCase = step_with_optimizer
UpperCAmelCase = GradientState()
def _UpperCamelCase ( self ,*A ,**A ):
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*A ,**A )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*A ,**A )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
UpperCAmelCase = AcceleratorState().num_processes
for _ in range(A ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler ,"""total_steps""" ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*A ,**A )
else:
self.scheduler.step(*A ,**A )
def _UpperCamelCase ( self ):
return self.scheduler.get_last_lr()
def _UpperCamelCase ( self ):
return self.scheduler.state_dict()
def _UpperCamelCase ( self ,A ):
self.scheduler.load_state_dict(A )
def _UpperCamelCase ( self ):
return self.scheduler.get_lr()
def _UpperCamelCase ( self ,*A ,**A ):
return self.scheduler.print_lr(*A ,**A )
| 701 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class lowerCamelCase__ :
def __init__( self ,A = 6 ):
UpperCAmelCase = None
UpperCAmelCase = None
self.create_linked_list(A )
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = Node()
UpperCAmelCase = current_node
UpperCAmelCase = current_node
UpperCAmelCase = current_node
for _ in range(1 ,A ):
UpperCAmelCase = Node()
UpperCAmelCase = current_node
UpperCAmelCase = previous_node
UpperCAmelCase = current_node
UpperCAmelCase = self.front
UpperCAmelCase = previous_node
def _UpperCamelCase ( self ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def _UpperCamelCase ( self ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def _UpperCamelCase ( self ,A ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
UpperCAmelCase = self.rear.next
if self.rear:
UpperCAmelCase = data
def _UpperCamelCase ( self ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
UpperCAmelCase = self.front.data
UpperCAmelCase = None
return data
UpperCAmelCase = self.front
UpperCAmelCase = old_front.next
UpperCAmelCase = old_front.data
UpperCAmelCase = None
return data
def _UpperCamelCase ( self ):
if self.is_empty():
raise Exception("""Empty Queue""" )
def _UpperCamelCase ( self ):
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class lowerCamelCase__ :
def __init__( self ):
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase__ ( snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = GPTSanJapaneseTokenizer
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = {'''do_clean_text''': False, '''add_prefix_space''': False}
def _UpperCamelCase ( self ):
super().setUp()
# fmt: off
UpperCAmelCase = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
UpperCAmelCase = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
UpperCAmelCase = {"""unk_token""": """<unk>"""}
UpperCAmelCase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file ,"""w""" ) as emoji_writer:
emoji_writer.write(json.dumps(A ) )
def _UpperCamelCase ( self ,**A ):
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname ,**A )
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
UpperCAmelCase = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def _UpperCamelCase ( self ,A ):
UpperCAmelCase , UpperCAmelCase = self.get_input_output_texts(A )
UpperCAmelCase = tokenizer.encode(A ,add_special_tokens=A )
UpperCAmelCase = tokenizer.decode(A ,clean_up_tokenization_spaces=A )
return text, ids
def _UpperCamelCase ( self ):
pass # TODO add if relevant
def _UpperCamelCase ( self ):
pass # TODO add if relevant
def _UpperCamelCase ( self ):
pass # TODO add if relevant
def _UpperCamelCase ( self ):
UpperCAmelCase = self.get_tokenizer()
# Testing tokenization
UpperCAmelCase = """こんにちは、世界。 こんばんは、㔺界。"""
UpperCAmelCase = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
UpperCAmelCase = tokenizer.tokenize(A )
self.assertListEqual(A ,A )
# Testing conversion to ids without special tokens
UpperCAmelCase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
UpperCAmelCase = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(A ,A )
# Testing conversion to ids with special tokens
UpperCAmelCase = tokens + [tokenizer.unk_token]
UpperCAmelCase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
UpperCAmelCase = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(A ,A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.get_tokenizer()
# Testing tokenization
UpperCAmelCase = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
UpperCAmelCase = """こんにちは、、、、世界。こんばんは、、、、世界。"""
UpperCAmelCase = tokenizer.encode(A )
UpperCAmelCase = tokenizer.decode(A )
self.assertEqual(A ,A )
@slow
def _UpperCamelCase ( self ):
UpperCAmelCase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
UpperCAmelCase = """こんにちは、世界。"""
UpperCAmelCase = """こんばんは、㔺界。😀"""
UpperCAmelCase = """こんにちは、世界。こんばんは、世界。😀"""
UpperCAmelCase = tokenizer.encode(prefix_text + input_text )
UpperCAmelCase = tokenizer.encode("""""" ,prefix_text=prefix_text + input_text )
UpperCAmelCase = tokenizer.encode(A ,prefix_text=A )
UpperCAmelCase = tokenizer.decode(A )
UpperCAmelCase = tokenizer.decode(A )
UpperCAmelCase = tokenizer.decode(A )
self.assertEqual(A ,A )
self.assertEqual(A ,A )
self.assertEqual(A ,A )
@slow
def _UpperCamelCase ( self ):
UpperCAmelCase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
UpperCAmelCase = """こんにちは、世界。"""
UpperCAmelCase = """こんばんは、㔺界。😀"""
UpperCAmelCase = len(tokenizer.encode(A ) ) - 2
UpperCAmelCase = len(tokenizer.encode(A ) ) - 2
UpperCAmelCase = [1] + [0] * (len_prefix + len_text + 1)
UpperCAmelCase = [1] * (len_prefix + len_text + 1) + [0]
UpperCAmelCase = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
UpperCAmelCase = tokenizer(prefix_text + input_text ).token_type_ids
UpperCAmelCase = tokenizer("""""" ,prefix_text=prefix_text + input_text ).token_type_ids
UpperCAmelCase = tokenizer(A ,prefix_text=A ).token_type_ids
self.assertListEqual(A ,A )
self.assertListEqual(A ,A )
self.assertListEqual(A ,A )
@slow
def _UpperCamelCase ( self ):
UpperCAmelCase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
UpperCAmelCase = tokenizer.encode("""あンいワ""" )
UpperCAmelCase = tokenizer.encode("""""" ,prefix_text="""あンいワ""" )
UpperCAmelCase = tokenizer.encode("""いワ""" ,prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(A ) ,tokenizer.decode(A ) )
self.assertEqual(tokenizer.decode(A ) ,tokenizer.decode(A ) )
self.assertNotEqual(A ,A )
self.assertNotEqual(A ,A )
self.assertEqual(x_token_a[1] ,x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] ,x_token_a[3] ) # SEG token
@slow
def _UpperCamelCase ( self ):
UpperCAmelCase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
UpperCAmelCase = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
UpperCAmelCase = tokenizer(A ,padding=A )
UpperCAmelCase = tokenizer.batch_encode_plus(A ,padding=A )
# fmt: off
UpperCAmelCase = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]]
UpperCAmelCase = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
UpperCAmelCase = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids ,A )
self.assertListEqual(x_token.token_type_ids ,A )
self.assertListEqual(x_token.attention_mask ,A )
self.assertListEqual(x_token_a.input_ids ,A )
self.assertListEqual(x_token_a.token_type_ids ,A )
self.assertListEqual(x_token_a.attention_mask ,A )
def _UpperCamelCase ( self ):
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def _UpperCamelCase ( self ):
# tokenizer has no padding token
pass
| 702 |
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger("""transformers.models.speecht5""")
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
hf_model.apply_weight_norm()
UpperCAmelCase = checkpoint["""input_conv.weight_g"""]
UpperCAmelCase = checkpoint["""input_conv.weight_v"""]
UpperCAmelCase = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
UpperCAmelCase = checkpoint[F'''upsamples.{i}.1.weight_g''']
UpperCAmelCase = checkpoint[F'''upsamples.{i}.1.weight_v''']
UpperCAmelCase = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
UpperCAmelCase = checkpoint["""output_conv.1.weight_g"""]
UpperCAmelCase = checkpoint["""output_conv.1.weight_v"""]
UpperCAmelCase = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def _a ( _snake_case , _snake_case , _snake_case , _snake_case=None , _snake_case=None , ):
"""simple docstring"""
if config_path is not None:
UpperCAmelCase = SpeechTaHifiGanConfig.from_pretrained(_snake_case )
else:
UpperCAmelCase = SpeechTaHifiGanConfig()
UpperCAmelCase = SpeechTaHifiGan(_snake_case )
UpperCAmelCase = torch.load(_snake_case )
load_weights(orig_checkpoint["""model"""]["""generator"""] , _snake_case , _snake_case )
UpperCAmelCase = np.load(_snake_case )
UpperCAmelCase = stats[0].reshape(-1 )
UpperCAmelCase = stats[1].reshape(-1 )
UpperCAmelCase = torch.from_numpy(_snake_case ).float()
UpperCAmelCase = torch.from_numpy(_snake_case ).float()
model.save_pretrained(_snake_case )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(_snake_case )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_UpperCamelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 74 | 0 |
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class lowerCamelCase__ :
def __init__( self ):
UpperCAmelCase = psutil.Process()
UpperCAmelCase = False
def _UpperCamelCase ( self ):
UpperCAmelCase = -1
while True:
UpperCAmelCase = max(self.process.memory_info().rss ,self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def _UpperCamelCase ( self ):
UpperCAmelCase = True
UpperCAmelCase = threading.Thread(target=self.peak_monitor )
UpperCAmelCase = True
self.thread.start()
def _UpperCamelCase ( self ):
UpperCAmelCase = False
self.thread.join()
return self.cpu_memory_peak
_UpperCamelCase = PeakCPUMemory()
def _a ( ):
"""simple docstring"""
UpperCAmelCase = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
UpperCAmelCase = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
UpperCAmelCase = torch.cuda.memory_allocated(snake_case_ )
torch.cuda.reset_peak_memory_stats()
return measures
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
UpperCAmelCase = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
UpperCAmelCase = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
UpperCAmelCase = (torch.cuda.memory_allocated(snake_case_ ) - start_measures[str(snake_case_ )]) / 2**20
UpperCAmelCase = (torch.cuda.max_memory_allocated(snake_case_ ) - start_measures[str(snake_case_ )]) / 2**20
return measures
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
print(F'''{description}:''' )
print(F'''- Time: {measures['time']:.2f}s''' )
for i in range(torch.cuda.device_count() ):
print(F'''- GPU {i} allocated: {measures[str(snake_case_ )]:.2f}MiB''' )
UpperCAmelCase = measures[F'''{i}-peak''']
print(F'''- GPU {i} peak: {peak:.2f}MiB''' )
print(F'''- CPU RAM allocated: {measures['cpu']:.2f}MiB''' )
print(F'''- CPU RAM peak: {measures['cpu-peak']:.2f}MiB''' )
| 703 |
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
_UpperCamelCase = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_UpperCamelCase = concatenate_datasets
_UpperCamelCase = DownloadConfig
_UpperCamelCase = DownloadManager
_UpperCamelCase = DownloadMode
_UpperCamelCase = DownloadConfig
_UpperCamelCase = DownloadMode
_UpperCamelCase = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 74 | 0 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class lowerCamelCase__ ( lowercase__ ):
def __init__( self ,A ,A ,A ):
UpperCAmelCase = dataset
UpperCAmelCase = process
UpperCAmelCase = params
def __len__( self ):
return len(self.dataset )
def __getitem__( self ,A ):
UpperCAmelCase = self.dataset[i]
UpperCAmelCase = self.process(A ,**self.params )
return processed
class lowerCamelCase__ ( lowercase__ ):
def __init__( self ,A ,A ,A ,A=None ):
UpperCAmelCase = loader
UpperCAmelCase = infer
UpperCAmelCase = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
UpperCAmelCase = None
UpperCAmelCase = loader_batch_size
# Internal bookkeeping
UpperCAmelCase = None
UpperCAmelCase = None
def __len__( self ):
return len(self.loader )
def __iter__( self ):
UpperCAmelCase = iter(self.loader )
return self
def _UpperCamelCase ( self ):
if isinstance(self._loader_batch_data ,torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
UpperCAmelCase = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
UpperCAmelCase = {}
for k, element in self._loader_batch_data.items():
if isinstance(A ,A ):
# Convert ModelOutput to tuple first
UpperCAmelCase = element.to_tuple()
if isinstance(element[0] ,torch.Tensor ):
UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] ,np.ndarray ):
UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] ,0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(A ,A ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] ,torch.Tensor ):
UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] ,np.ndarray ):
UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] ,0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
UpperCAmelCase = None
elif isinstance(element[self._loader_batch_index] ,torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
UpperCAmelCase = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] ,np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
UpperCAmelCase = np.expand_dims(element[self._loader_batch_index] ,0 )
else:
# This is typically a list, so no need to `unsqueeze`.
UpperCAmelCase = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
UpperCAmelCase = self._loader_batch_data.__class__(A )
self._loader_batch_index += 1
return result
def _UpperCamelCase ( self ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
UpperCAmelCase = next(self.iterator )
UpperCAmelCase = self.infer(A ,**self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(A ,torch.Tensor ):
UpperCAmelCase = processed
else:
UpperCAmelCase = list(processed.keys() )[0]
UpperCAmelCase = processed[key]
if isinstance(A ,A ):
UpperCAmelCase = len(A )
else:
UpperCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
UpperCAmelCase = observed_batch_size
# Setting internal index to unwrap the batch
UpperCAmelCase = processed
UpperCAmelCase = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class lowerCamelCase__ ( lowercase__ ):
def __init__( self ,A ,A ,A ,A=None ):
super().__init__(A ,A ,A )
def __iter__( self ):
UpperCAmelCase = iter(self.loader )
UpperCAmelCase = None
return self
def _UpperCamelCase ( self ):
if self.subiterator is None:
UpperCAmelCase = self.infer(next(self.iterator ) ,**self.params )
try:
# Try to return next item
UpperCAmelCase = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
UpperCAmelCase = self.infer(next(self.iterator ) ,**self.params )
UpperCAmelCase = next(self.subiterator )
return processed
class lowerCamelCase__ ( lowercase__ ):
def __iter__( self ):
UpperCAmelCase = iter(self.loader )
return self
def _UpperCamelCase ( self ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
UpperCAmelCase = False
UpperCAmelCase = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
UpperCAmelCase = self.loader_batch_item()
UpperCAmelCase = item.pop("""is_last""" )
accumulator.append(A )
if is_last:
return accumulator
while not is_last:
UpperCAmelCase = self.infer(next(self.iterator ) ,**self.params )
if self.loader_batch_size is not None:
if isinstance(A ,torch.Tensor ):
UpperCAmelCase = processed
else:
UpperCAmelCase = list(processed.keys() )[0]
UpperCAmelCase = processed[key]
if isinstance(A ,A ):
UpperCAmelCase = len(A )
else:
UpperCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
UpperCAmelCase = observed_batch_size
UpperCAmelCase = processed
UpperCAmelCase = 0
while self._loader_batch_index < self.loader_batch_size:
UpperCAmelCase = self.loader_batch_item()
UpperCAmelCase = item.pop("""is_last""" )
accumulator.append(A )
if is_last:
return accumulator
else:
UpperCAmelCase = processed
UpperCAmelCase = item.pop("""is_last""" )
accumulator.append(A )
return accumulator
class lowerCamelCase__ ( lowercase__ ):
def __init__( self ,A ,A ):
UpperCAmelCase = dataset
UpperCAmelCase = key
def __len__( self ):
return len(self.dataset )
def __getitem__( self ,A ):
return self.dataset[i][self.key]
class lowerCamelCase__ ( lowercase__ ):
def __init__( self ,A ,A ,A ):
UpperCAmelCase = dataset
UpperCAmelCase = keya
UpperCAmelCase = keya
def __len__( self ):
return len(self.dataset )
def __getitem__( self ,A ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 704 |
"""simple docstring"""
def _a ( _snake_case ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ):
raise ValueError("""Input must be an integer""" )
if input_num <= 0:
raise ValueError("""Input must be positive""" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self ):
UpperCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
UpperCAmelCase = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
sd_pipe.set_scheduler("""sample_euler""" )
UpperCAmelCase = """A painting of a squirrel eating a burger"""
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = sd_pipe([prompt] ,generator=_a ,guidance_scale=9.0 ,num_inference_steps=20 ,output_type="""np""" )
UpperCAmelCase = output.images
UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCamelCase ( self ):
UpperCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
UpperCAmelCase = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
sd_pipe.set_scheduler("""sample_euler""" )
UpperCAmelCase = """A painting of a squirrel eating a burger"""
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = sd_pipe([prompt] ,generator=_a ,guidance_scale=9.0 ,num_inference_steps=20 ,output_type="""np""" )
UpperCAmelCase = output.images
UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def _UpperCamelCase ( self ):
UpperCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
UpperCAmelCase = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
sd_pipe.set_scheduler("""sample_dpmpp_2m""" )
UpperCAmelCase = """A painting of a squirrel eating a burger"""
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = sd_pipe(
[prompt] ,generator=_a ,guidance_scale=7.5 ,num_inference_steps=15 ,output_type="""np""" ,use_karras_sigmas=_a ,)
UpperCAmelCase = output.images
UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase = np.array(
[0.11381689, 0.12112921, 0.1389457, 0.12549606, 0.1244964, 0.10831517, 0.11562866, 0.10867816, 0.10499048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 705 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_UpperCamelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
_UpperCamelCase = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
_UpperCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , )
SCREAMING_SNAKE_CASE = field(default=snake_case , metadata={'''help''': '''A folder containing the training data.'''} )
SCREAMING_SNAKE_CASE = field(default=snake_case , metadata={'''help''': '''A folder containing the validation data.'''} )
SCREAMING_SNAKE_CASE = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
SCREAMING_SNAKE_CASE = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} )
SCREAMING_SNAKE_CASE = field(
default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def _UpperCamelCase ( self ):
UpperCAmelCase = {}
if self.train_dir is not None:
UpperCAmelCase = self.train_dir
if self.validation_dir is not None:
UpperCAmelCase = self.validation_dir
UpperCAmelCase = data_files if data_files else None
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '''
'''checkpoint identifier on the hub. '''
'''Don\'t set if you want to train a model from scratch.'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(snake_case )} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , )
SCREAMING_SNAKE_CASE = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
SCREAMING_SNAKE_CASE = field(default=snake_case , metadata={'''help''': '''Name or path of preprocessor config.'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Stride to use for the encoder.'''} , )
class lowerCamelCase__ :
def __init__( self ,A=192 ,A=32 ,A=4 ,A=0.6 ):
UpperCAmelCase = input_size
UpperCAmelCase = mask_patch_size
UpperCAmelCase = model_patch_size
UpperCAmelCase = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("""Input size must be divisible by mask patch size""" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("""Mask patch size must be divisible by model patch size""" )
UpperCAmelCase = self.input_size // self.mask_patch_size
UpperCAmelCase = self.mask_patch_size // self.model_patch_size
UpperCAmelCase = self.rand_size**2
UpperCAmelCase = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self ):
UpperCAmelCase = np.random.permutation(self.token_count )[: self.mask_count]
UpperCAmelCase = np.zeros(self.token_count ,dtype=A )
UpperCAmelCase = 1
UpperCAmelCase = mask.reshape((self.rand_size, self.rand_size) )
UpperCAmelCase = mask.repeat(self.scale ,axis=0 ).repeat(self.scale ,axis=1 )
return torch.tensor(mask.flatten() )
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = torch.stack([example["""pixel_values"""] for example in examples] )
UpperCAmelCase = torch.stack([example["""mask"""] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def _a ( ):
"""simple docstring"""
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mim""" , _snake_case , _snake_case )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(_snake_case )
transformers.utils.logging.set_verbosity(_snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCAmelCase = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _snake_case ) and data_args.train_val_split > 0.0:
UpperCAmelCase = ds["""train"""].train_test_split(data_args.train_val_split )
UpperCAmelCase = split["""train"""]
UpperCAmelCase = split["""test"""]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
UpperCAmelCase = AutoConfig.from_pretrained(model_args.config_name_or_path , **_snake_case )
elif model_args.model_name_or_path:
UpperCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , **_snake_case )
else:
UpperCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(_snake_case , """decoder_type""" ):
UpperCAmelCase = """simmim"""
# adapt config
UpperCAmelCase = model_args.image_size if model_args.image_size is not None else config.image_size
UpperCAmelCase = model_args.patch_size if model_args.patch_size is not None else config.patch_size
UpperCAmelCase = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"""image_size""": model_args.image_size,
"""patch_size""": model_args.patch_size,
"""encoder_stride""": model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
UpperCAmelCase = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **_snake_case )
elif model_args.model_name_or_path:
UpperCAmelCase = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **_snake_case )
else:
UpperCAmelCase = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
UpperCAmelCase = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
UpperCAmelCase = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
UpperCAmelCase = AutoModelForMaskedImageModeling.from_config(_snake_case )
if training_args.do_train:
UpperCAmelCase = ds["""train"""].column_names
else:
UpperCAmelCase = ds["""validation"""].column_names
if data_args.image_column_name is not None:
UpperCAmelCase = data_args.image_column_name
elif "image" in column_names:
UpperCAmelCase = """image"""
elif "img" in column_names:
UpperCAmelCase = """img"""
else:
UpperCAmelCase = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
UpperCAmelCase = Compose(
[
Lambda(lambda _snake_case : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
UpperCAmelCase = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(_snake_case ):
UpperCAmelCase = [transforms(_snake_case ) for image in examples[image_column_name]]
UpperCAmelCase = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
UpperCAmelCase = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_snake_case )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
UpperCAmelCase = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_snake_case )
# Initialize our trainer
UpperCAmelCase = Trainer(
model=_snake_case , args=_snake_case , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=_snake_case , data_collator=_snake_case , )
# Training
if training_args.do_train:
UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase = last_checkpoint
UpperCAmelCase = trainer.train(resume_from_checkpoint=_snake_case )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCAmelCase = trainer.evaluate()
trainer.log_metrics("""eval""" , _snake_case )
trainer.save_metrics("""eval""" , _snake_case )
# Write model card and (optionally) push to hub
UpperCAmelCase = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """masked-image-modeling""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-image-modeling"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_snake_case )
else:
trainer.create_model_card(**_snake_case )
if __name__ == "__main__":
main()
| 74 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCamelCase__ ( __a , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VideoToVideoSDPipeline
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'''video'''} ) - {'''image''', '''width''', '''height'''}
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''video'''} ) - {'''image'''}
SCREAMING_SNAKE_CASE = PipelineTesterMixin.required_optional_params - {'''latents'''}
SCREAMING_SNAKE_CASE = False
# No `output_type`.
SCREAMING_SNAKE_CASE = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def _UpperCamelCase ( self ):
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") ,up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") ,cross_attention_dim=32 ,attention_head_dim=4 ,)
UpperCAmelCase = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,clip_sample=a_ ,set_alpha_to_one=a_ ,)
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,sample_size=128 ,)
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,hidden_act="""gelu""" ,projection_dim=512 ,)
UpperCAmelCase = CLIPTextModel(a_ )
UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def _UpperCamelCase ( self ,A ,A=0 ):
UpperCAmelCase = floats_tensor((1, 3, 3, 32, 32) ,rng=random.Random(a_ ) ).to(a_ )
if str(a_ ).startswith("""mps""" ):
UpperCAmelCase = torch.manual_seed(a_ )
else:
UpperCAmelCase = torch.Generator(device=a_ ).manual_seed(a_ )
UpperCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""video""": video,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def _UpperCamelCase ( self ):
UpperCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = VideoToVideoSDPipeline(**a_ )
UpperCAmelCase = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
UpperCAmelCase = self.get_dummy_inputs(a_ )
UpperCAmelCase = """np"""
UpperCAmelCase = sd_pipe(**a_ ).frames
UpperCAmelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
UpperCAmelCase = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() ,reason="""XFormers attention is only available with CUDA and `xformers` installed""" ,)
def _UpperCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=a_ ,expected_max_diff=5e-3 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def _UpperCamelCase ( self ):
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def _UpperCamelCase ( self ):
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
UpperCAmelCase = VideoToVideoSDPipeline.from_pretrained("""cerspense/zeroscope_v2_XL""" ,torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
UpperCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCAmelCase = torch.randn((1, 10, 3, 1_024, 576) ,generator=a_ )
UpperCAmelCase = video.to("""cuda""" )
UpperCAmelCase = """Spiderman is surfing"""
UpperCAmelCase = pipe(a_ ,video=a_ ,generator=a_ ,num_inference_steps=3 ,output_type="""pt""" ).frames
UpperCAmelCase = np.array([-1.0458984, -1.1279297, -0.9663086, -0.91503906, -0.75097656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 706 |
"""simple docstring"""
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
_UpperCamelCase = True
from torch.cuda.amp import autocast
_UpperCamelCase = logging.getLogger(__name__)
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Whether to log verbose messages or not.'''} , )
SCREAMING_SNAKE_CASE = field(
default=2.0 , metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} )
SCREAMING_SNAKE_CASE = field(
default=0.5 , metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} )
SCREAMING_SNAKE_CASE = field(
default=0.99_99_95 , metadata={'''help''': '''Decay of gumbel temperature during training.'''} )
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
UpperCAmelCase = logging.WARNING
if model_args.verbose_logging:
UpperCAmelCase = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
UpperCAmelCase = logging.INFO
logger.setLevel(_snake_case )
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
SCREAMING_SNAKE_CASE = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
SCREAMING_SNAKE_CASE = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
SCREAMING_SNAKE_CASE = field(
default='''file''' , metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
SCREAMING_SNAKE_CASE = field(
default=1 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
SCREAMING_SNAKE_CASE = field(
default=20.0 , metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} )
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = "longest"
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
def __call__( self ,A ):
# reformat list to dict and set to pytorch format
UpperCAmelCase = self.feature_extractor.pad(
A ,max_length=self.max_length ,padding=self.padding ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors="""pt""" ,)
UpperCAmelCase = self.model._get_feat_extract_output_lengths(batch["""input_values"""].shape[-1] )
UpperCAmelCase = batch["""input_values"""].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
UpperCAmelCase = self.model._get_feat_extract_output_lengths(batch["""attention_mask"""].sum(-1 ) ).to(
torch.long )
UpperCAmelCase = torch.zeros(
(batch_size, mask_indices_seq_length) ,dtype=torch.long ,device=batch["""input_values"""].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
UpperCAmelCase = 1
UpperCAmelCase = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
UpperCAmelCase = _compute_mask_indices(
(batch_size, mask_indices_seq_length) ,self.model.config.mask_time_prob ,self.model.config.mask_time_length ,attention_mask=A ,min_masks=2 ,)
return batch
class lowerCamelCase__ ( snake_case ):
def __init__( self ,*A ,A=1 ,A=0 ,A=1.0 ,**A ):
super().__init__(*A ,**A )
UpperCAmelCase = 0
UpperCAmelCase = max_gumbel_temp
UpperCAmelCase = min_gumbel_temp
UpperCAmelCase = gumbel_temp_decay
def _UpperCamelCase ( self ,A ,A ):
model.train()
UpperCAmelCase = self._prepare_inputs(A )
if self.use_amp:
with autocast():
UpperCAmelCase = self.compute_loss(A ,A )
else:
UpperCAmelCase = self.compute_loss(A ,A )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
UpperCAmelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
UpperCAmelCase = loss.sum() / (inputs["""mask_time_indices"""]).sum()
else:
raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
UpperCAmelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(A ).backward()
elif self.use_apex:
with amp.scale_loss(A ,self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(A )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) )
return loss.detach()
def _a ( ):
"""simple docstring"""
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
configure_logger(_snake_case , _snake_case )
# Downloading and loading a dataset from the hub.
UpperCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
UpperCAmelCase = DatasetDict()
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , )
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
UpperCAmelCase = DatasetDict()
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="""validation""" , cache_dir=model_args.cache_dir , )
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_snake_case )
def prepare_dataset(_snake_case ):
# check that all files have the correct sampling rate
UpperCAmelCase , UpperCAmelCase = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
UpperCAmelCase = datasets.map(
_snake_case , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["""train"""].column_names )
# filter audio files that are too long
UpperCAmelCase = vectorized_datasets.filter(
lambda _snake_case : len(data["""speech"""] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_snake_case ):
return feature_extractor(batch["""speech"""] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
UpperCAmelCase = vectorized_datasets.map(
_snake_case , batched=_snake_case , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["""train"""].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
UpperCAmelCase = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"""PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"""
""" ``config.feat_extract_norm='layer'""" )
UpperCAmelCase = WavaVecaForPreTraining(_snake_case )
UpperCAmelCase = DataCollatorForWavaVecaPretraining(model=_snake_case , feature_extractor=_snake_case )
UpperCAmelCase = WavaVecaPreTrainer(
model=_snake_case , data_collator=_snake_case , args=_snake_case , train_dataset=vectorized_datasets["""train"""] , eval_dataset=vectorized_datasets["""validation"""] , tokenizer=_snake_case , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 74 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
if len(__snake_case ) != 2 or len(a[0] ) != 2 or len(__snake_case ) != 2 or len(b[0] ) != 2:
raise Exception("""Matrices are not 2x2""" )
UpperCAmelCase = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__snake_case ) )
]
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__snake_case ) )
]
def _a ( _snake_case ):
"""simple docstring"""
if len(__snake_case ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("""Odd matrices are not supported!""" )
UpperCAmelCase = len(__snake_case )
UpperCAmelCase = matrix_length // 2
UpperCAmelCase = [[a[i][j] for j in range(__snake_case , __snake_case )] for i in range(__snake_case )]
UpperCAmelCase = [
[a[i][j] for j in range(__snake_case , __snake_case )] for i in range(__snake_case , __snake_case )
]
UpperCAmelCase = [[a[i][j] for j in range(__snake_case )] for i in range(__snake_case )]
UpperCAmelCase = [[a[i][j] for j in range(__snake_case )] for i in range(__snake_case , __snake_case )]
return top_left, top_right, bot_left, bot_right
def _a ( _snake_case ):
"""simple docstring"""
return len(__snake_case ), len(matrix[0] )
def _a ( _snake_case ):
"""simple docstring"""
print("""\n""".join(str(__snake_case ) for line in matrix ) )
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
if matrix_dimensions(__snake_case ) == (2, 2):
return default_matrix_multiplication(__snake_case , __snake_case )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = split_matrix(__snake_case )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = split_matrix(__snake_case )
UpperCAmelCase = actual_strassen(__snake_case , matrix_subtraction(__snake_case , __snake_case ) )
UpperCAmelCase = actual_strassen(matrix_addition(__snake_case , __snake_case ) , __snake_case )
UpperCAmelCase = actual_strassen(matrix_addition(__snake_case , __snake_case ) , __snake_case )
UpperCAmelCase = actual_strassen(__snake_case , matrix_subtraction(__snake_case , __snake_case ) )
UpperCAmelCase = actual_strassen(matrix_addition(__snake_case , __snake_case ) , matrix_addition(__snake_case , __snake_case ) )
UpperCAmelCase = actual_strassen(matrix_subtraction(__snake_case , __snake_case ) , matrix_addition(__snake_case , __snake_case ) )
UpperCAmelCase = actual_strassen(matrix_subtraction(__snake_case , __snake_case ) , matrix_addition(__snake_case , __snake_case ) )
UpperCAmelCase = matrix_addition(matrix_subtraction(matrix_addition(__snake_case , __snake_case ) , __snake_case ) , __snake_case )
UpperCAmelCase = matrix_addition(__snake_case , __snake_case )
UpperCAmelCase = matrix_addition(__snake_case , __snake_case )
UpperCAmelCase = matrix_subtraction(matrix_subtraction(matrix_addition(__snake_case , __snake_case ) , __snake_case ) , __snake_case )
# construct the new matrix from our 4 quadrants
UpperCAmelCase = []
for i in range(len(__snake_case ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__snake_case ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
if matrix_dimensions(__snake_case )[1] != matrix_dimensions(__snake_case )[0]:
UpperCAmelCase = (
"""Unable to multiply these matrices, please check the dimensions.\n"""
F'''Matrix A: {matrixa}\n'''
F'''Matrix B: {matrixa}'''
)
raise Exception(__snake_case )
UpperCAmelCase = matrix_dimensions(__snake_case )
UpperCAmelCase = matrix_dimensions(__snake_case )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
UpperCAmelCase = max(*__snake_case , *__snake_case )
UpperCAmelCase = int(math.pow(2 , math.ceil(math.loga(__snake_case ) ) ) )
UpperCAmelCase = matrixa
UpperCAmelCase = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , __snake_case ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __snake_case ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __snake_case ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
UpperCAmelCase = actual_strassen(__snake_case , __snake_case )
# Removing the additional zeros
for i in range(0 , __snake_case ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __snake_case ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
_UpperCamelCase : Optional[int] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
_UpperCamelCase : Union[str, Any] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 707 |
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class lowerCamelCase__ :
def __init__( self ,A ,):
UpperCAmelCase = parent
UpperCAmelCase = 13
UpperCAmelCase = 7
UpperCAmelCase = 30
UpperCAmelCase = self.seq_length + self.mem_len
UpperCAmelCase = 15
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = 99
UpperCAmelCase = [10, 50, 80]
UpperCAmelCase = 32
UpperCAmelCase = 32
UpperCAmelCase = 4
UpperCAmelCase = 8
UpperCAmelCase = 128
UpperCAmelCase = 2
UpperCAmelCase = 2
UpperCAmelCase = None
UpperCAmelCase = 1
UpperCAmelCase = 0
UpperCAmelCase = 3
UpperCAmelCase = self.vocab_size - 1
UpperCAmelCase = 0.01
def _UpperCamelCase ( self ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = TransfoXLConfig(
vocab_size=self.vocab_size ,mem_len=self.mem_len ,clamp_len=self.clamp_len ,cutoffs=self.cutoffs ,d_model=self.hidden_size ,d_embed=self.d_embed ,n_head=self.num_attention_heads ,d_head=self.d_head ,d_inner=self.d_inner ,div_val=self.div_val ,n_layer=self.num_hidden_layers ,eos_token_id=self.eos_token_id ,pad_token_id=self.vocab_size - 1 ,init_range=self.init_range ,num_labels=self.num_labels ,)
return (config, input_ids_a, input_ids_a, lm_labels)
def _UpperCamelCase ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def _UpperCamelCase ( self ,A ,A ,A ,A ):
UpperCAmelCase = TFTransfoXLModel(A )
UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple()
UpperCAmelCase = {"""input_ids""": input_ids_a, """mems""": mems_a}
UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
def _UpperCamelCase ( self ,A ,A ,A ,A ):
UpperCAmelCase = TFTransfoXLLMHeadModel(A )
UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple()
UpperCAmelCase = {"""input_ids""": input_ids_a, """labels""": lm_labels}
UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple()
UpperCAmelCase , UpperCAmelCase = model([input_ids_a, mems_a] ).to_tuple()
UpperCAmelCase = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
def _UpperCamelCase ( self ,A ,A ,A ,A ):
UpperCAmelCase = TFTransfoXLForSequenceClassification(A )
UpperCAmelCase = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.prepare_config_and_inputs()
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) = config_and_inputs
UpperCAmelCase = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( snake_case , snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
SCREAMING_SNAKE_CASE = () if is_tf_available() else ()
SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': TFTransfoXLModel,
'''text-classification''': TFTransfoXLForSequenceClassification,
'''text-generation''': TFTransfoXLLMHeadModel,
'''zero-shot''': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def _UpperCamelCase ( self ):
UpperCAmelCase = TFTransfoXLModelTester(self )
UpperCAmelCase = ConfigTester(self ,config_class=A ,d_embed=37 )
def _UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
self.model_tester.set_seed()
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*A )
def _UpperCamelCase ( self ):
self.model_tester.set_seed()
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(A )
assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
UpperCAmelCase = model.get_output_embeddings()
assert isinstance(A ,tf.keras.layers.Layer )
UpperCAmelCase = model.get_bias()
assert name is None
else:
UpperCAmelCase = model.get_output_embeddings()
assert x is None
UpperCAmelCase = model.get_bias()
assert name is None
def _UpperCamelCase ( self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def _UpperCamelCase ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = TFTransfoXLModel.from_pretrained(A )
self.assertIsNotNone(A )
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" )
def _UpperCamelCase ( self ):
pass
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def _UpperCamelCase ( self ):
UpperCAmelCase = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
UpperCAmelCase = tf.convert_to_tensor([[33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0]] ,dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
UpperCAmelCase = [33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0,33,1,1_857,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,28,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
UpperCAmelCase = model.generate(A ,max_length=200 ,do_sample=A )
self.assertListEqual(output_ids[0].numpy().tolist() ,A )
| 74 | 0 |
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_UpperCamelCase = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class lowerCamelCase__ ( datasets.BuilderConfig ):
SCREAMING_SNAKE_CASE = None
def _a ( _snake_case , _snake_case , ):
"""simple docstring"""
import pyspark
def generate_fn():
UpperCAmelCase = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
UpperCAmelCase = df_with_partition_id.select("""*""" ).where(F'''part_id = {partition_id}''' ).drop("""part_id""" )
UpperCAmelCase = partition_df.collect()
UpperCAmelCase = 0
for row in rows:
yield F'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class lowerCamelCase__ ( _BaseExamplesIterable ):
def __init__( self ,A ,A=None ,):
UpperCAmelCase = df
UpperCAmelCase = partition_order or range(self.df.rdd.getNumPartitions() )
UpperCAmelCase = _generate_iterable_examples(self.df ,self.partition_order )
def __iter__( self ):
yield from self.generate_examples_fn()
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowercase_ )
return SparkExamplesIterable(self.df ,partition_order=lowercase_ )
def _UpperCamelCase ( self ,A ,A ):
UpperCAmelCase = self.split_shard_indices_by_worker(lowercase_ ,lowercase_ )
return SparkExamplesIterable(self.df ,partition_order=lowercase_ )
@property
def _UpperCamelCase ( self ):
return len(self.partition_order )
class lowerCamelCase__ ( datasets.DatasetBuilder ):
SCREAMING_SNAKE_CASE = SparkConfig
def __init__( self ,A ,A = None ,A = None ,**A ,):
import pyspark
UpperCAmelCase = pyspark.sql.SparkSession.builder.getOrCreate()
UpperCAmelCase = df
UpperCAmelCase = working_dir
super().__init__(
cache_dir=lowercase_ ,config_name=str(self.df.semanticHash() ) ,**lowercase_ ,)
def _UpperCamelCase ( self ):
# Returns the path of the created file.
def create_cache_and_write_probe(A ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir ,exist_ok=lowercase_ )
UpperCAmelCase = os.path.join(self._cache_dir ,"""fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowercase_ ,"""a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" ,"""""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
UpperCAmelCase = (
self._spark.sparkContext.parallelize(range(1 ) ,1 ).mapPartitions(lowercase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def _UpperCamelCase ( self ):
return datasets.DatasetInfo(features=self.config.features )
def _UpperCamelCase ( self ,A ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def _UpperCamelCase ( self ,A ):
import pyspark
def get_arrow_batch_size(A ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
UpperCAmelCase = self.df.count()
UpperCAmelCase = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
UpperCAmelCase = (
self.df.limit(lowercase_ )
.repartition(1 )
.mapInArrow(lowercase_ ,"""batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
UpperCAmelCase = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
UpperCAmelCase = min(lowercase_ ,int(approx_total_size / max_shard_size ) )
UpperCAmelCase = self.df.repartition(lowercase_ )
def _UpperCamelCase ( self ,A ,A ,A ,):
import pyspark
UpperCAmelCase = ParquetWriter if file_format == """parquet""" else ArrowWriter
UpperCAmelCase = os.path.join(self._working_dir ,os.path.basename(lowercase_ ) ) if self._working_dir else fpath
UpperCAmelCase = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
UpperCAmelCase = self.config.features
UpperCAmelCase = self._writer_batch_size
UpperCAmelCase = self._fs.storage_options
def write_arrow(A ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
UpperCAmelCase = pyspark.TaskContext().taskAttemptId()
UpperCAmelCase = next(lowercase_ ,lowercase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,)
UpperCAmelCase = 0
UpperCAmelCase = writer_class(
features=lowercase_ ,path=working_fpath.replace("""SSSSS""" ,F'''{shard_id:05d}''' ).replace("""TTTTT""" ,F'''{task_id:05d}''' ) ,writer_batch_size=lowercase_ ,storage_options=lowercase_ ,embed_local_files=lowercase_ ,)
UpperCAmelCase = pa.Table.from_batches([first_batch] )
writer.write_table(lowercase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
UpperCAmelCase = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,)
shard_id += 1
UpperCAmelCase = writer_class(
features=writer._features ,path=working_fpath.replace("""SSSSS""" ,F'''{shard_id:05d}''' ).replace("""TTTTT""" ,F'''{task_id:05d}''' ) ,writer_batch_size=lowercase_ ,storage_options=lowercase_ ,embed_local_files=lowercase_ ,)
UpperCAmelCase = pa.Table.from_batches([batch] )
writer.write_table(lowercase_ )
if writer._num_bytes > 0:
UpperCAmelCase = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,)
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowercase_ ) ):
UpperCAmelCase = os.path.join(os.path.dirname(lowercase_ ) ,os.path.basename(lowercase_ ) )
shutil.move(lowercase_ ,lowercase_ )
UpperCAmelCase = (
self.df.mapInArrow(lowercase_ ,"""task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) ,pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) ,pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) ,pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) ,)
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def _UpperCamelCase ( self ,A ,A = "arrow" ,A = None ,A = None ,**A ,):
self._validate_cache_dir()
UpperCAmelCase = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowercase_ )
UpperCAmelCase = not is_remote_filesystem(self._fs )
UpperCAmelCase = os.path.join if is_local else posixpath.join
UpperCAmelCase = """-TTTTT-SSSSS-of-NNNNN"""
UpperCAmelCase = F'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
UpperCAmelCase = path_join(self._output_dir ,lowercase_ )
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = []
UpperCAmelCase = []
for task_id, content in self._prepare_split_single(lowercase_ ,lowercase_ ,lowercase_ ):
(
UpperCAmelCase
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowercase_ )
UpperCAmelCase = total_num_examples
UpperCAmelCase = total_num_bytes
# should rename everything at the end
logger.debug(F'''Renaming {total_shards} shards.''' )
if total_shards > 1:
UpperCAmelCase = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
UpperCAmelCase = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
A ,A ,A ,):
rename(
lowercase_ ,fpath.replace("""SSSSS""" ,F'''{shard_id:05d}''' ).replace("""TTTTT""" ,F'''{task_id:05d}''' ) ,fpath.replace("""TTTTT-SSSSS""" ,F'''{global_shard_id:05d}''' ).replace("""NNNNN""" ,F'''{total_shards:05d}''' ) ,)
UpperCAmelCase = []
UpperCAmelCase = 0
for i in range(len(lowercase_ ) ):
UpperCAmelCase = task_id_and_num_shards[i]
for shard_id in range(lowercase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowercase_ ,len(lowercase_ ) ).map(lambda A : _rename_shard(*lowercase_ ) ).collect()
else:
# don't use any pattern
UpperCAmelCase = 0
UpperCAmelCase = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" ,F'''{shard_id:05d}''' ).replace("""TTTTT""" ,F'''{task_id:05d}''' ) ,fpath.replace(lowercase_ ,"""""" ) ,)
def _UpperCamelCase ( self ,A ,):
return SparkExamplesIterable(self.df )
| 708 |
"""simple docstring"""
from math import sqrt
def _a ( _snake_case = 100_0000 ):
"""simple docstring"""
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_snake_case , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""")
| 74 | 0 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCamelCase ( self ):
UpperCAmelCase = inspect.getfile(accelerate.test_utils )
UpperCAmelCase = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps""", """test_metrics.py"""] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
UpperCAmelCase = test_metrics
@require_cpu
def _UpperCamelCase ( self ):
debug_launcher(self.test_metrics.main ,num_processes=1 )
@require_cpu
def _UpperCamelCase ( self ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def _UpperCamelCase ( self ):
self.test_metrics.main()
@require_multi_gpu
def _UpperCamelCase ( self ):
print(F'''Found {torch.cuda.device_count()} devices.''' )
UpperCAmelCase = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase__ ,env=os.environ.copy() )
| 709 |
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
_UpperCamelCase = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
_UpperCamelCase = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def _a ( _snake_case , _snake_case=False ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = create_model(
"""HTSAT-tiny""" , """roberta""" , _snake_case , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=_snake_case , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = {}
UpperCAmelCase = R""".*sequential.(\d+).*"""
UpperCAmelCase = R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCAmelCase = key.replace(_snake_case , _snake_case )
if re.match(_snake_case , _snake_case ):
# replace sequential layers with list
UpperCAmelCase = re.match(_snake_case , _snake_case ).group(1 )
UpperCAmelCase = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(_snake_case )//3}.linear.''' )
elif re.match(_snake_case , _snake_case ):
UpperCAmelCase = int(re.match(_snake_case , _snake_case ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
UpperCAmelCase = 1 if projecton_layer == 0 else 2
UpperCAmelCase = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
UpperCAmelCase = value
UpperCAmelCase = mixed_qkv.size(0 ) // 3
UpperCAmelCase = mixed_qkv[:qkv_dim]
UpperCAmelCase = mixed_qkv[qkv_dim : qkv_dim * 2]
UpperCAmelCase = mixed_qkv[qkv_dim * 2 :]
UpperCAmelCase = query_layer
UpperCAmelCase = key_layer
UpperCAmelCase = value_layer
else:
UpperCAmelCase = value
return model_state_dict
def _a ( _snake_case , _snake_case , _snake_case , _snake_case=False ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = init_clap(_snake_case , enable_fusion=_snake_case )
clap_model.eval()
UpperCAmelCase = clap_model.state_dict()
UpperCAmelCase = rename_state_dict(_snake_case )
UpperCAmelCase = ClapConfig()
UpperCAmelCase = enable_fusion
UpperCAmelCase = ClapModel(_snake_case )
# ignore the spectrogram embedding layer
model.load_state_dict(_snake_case , strict=_snake_case )
model.save_pretrained(_snake_case )
transformers_config.save_pretrained(_snake_case )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
_UpperCamelCase = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 74 | 0 |
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class lowerCamelCase__ :
def __init__( self ):
UpperCAmelCase = """"""
UpperCAmelCase = """"""
UpperCAmelCase = []
UpperCAmelCase = 0
UpperCAmelCase = 256
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = cva.imread(__lowerCAmelCase ,0 )
UpperCAmelCase = copy.deepcopy(self.img )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = plt.hist(self.img.ravel() ,256 ,[0, 256] ,label="""x""" )
UpperCAmelCase = np.sum(__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) ):
UpperCAmelCase = x[i] / self.k
self.sk += prk
UpperCAmelCase = (self.L - 1) * self.sk
if self.rem != 0:
UpperCAmelCase = int(last % last )
UpperCAmelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(__lowerCAmelCase )
UpperCAmelCase = int(np.ma.count(self.img ) / self.img[1].size )
UpperCAmelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCAmelCase = self.img[j][i]
if num != self.last_list[num]:
UpperCAmelCase = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" ,self.img )
def _UpperCamelCase ( self ):
plt.hist(self.img.ravel() ,256 ,[0, 256] )
def _UpperCamelCase ( self ):
cva.imshow("""Output-Image""" ,self.img )
cva.imshow("""Input-Image""" ,self.original_image )
cva.waitKey(5_000 )
cva.destroyAllWindows()
if __name__ == "__main__":
_UpperCamelCase = os.path.join(os.path.basename(__file__), """image_data/input.jpg""")
_UpperCamelCase = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 710 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_UpperCamelCase = abspath(join(dirname(dirname(__file__)), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def _a ( _snake_case ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_snake_case )
def _a ( _snake_case ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
UpperCAmelCase = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(_snake_case , id=_snake_case )
| 74 | 0 |
"""simple docstring"""
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = os.path.abspath(_snake_case )
logger.info(F'''Converting TensorFlow checkpoint from {tf_path}''' )
# Load weights from TF model
UpperCAmelCase = tf.train.list_variables(_snake_case )
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
UpperCAmelCase = full_name.split("""/""" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F'''Skipping non-model layer {full_name}''' )
continue
if "optimizer" in full_name:
logger.info(F'''Skipping optimization layer {full_name}''' )
continue
if name[0] == "model":
# ignore initial 'model'
UpperCAmelCase = name[1:]
# figure out how many levels deep the name is
UpperCAmelCase = 0
for _name in name:
if _name.startswith("""layer_with_weights""" ):
depth += 1
else:
break
layer_depth.append(_snake_case )
# read data
UpperCAmelCase = tf.train.load_variable(_snake_case , _snake_case )
names.append("""/""".join(_snake_case ) )
arrays.append(_snake_case )
logger.info(F'''Read a total of {len(_snake_case ):,} layers''' )
# Sanity check
if len(set(_snake_case ) ) != 1:
raise ValueError(F'''Found layer names with different depths (layer depth {list(set(_snake_case ) )})''' )
UpperCAmelCase = list(set(_snake_case ) )[0]
if layer_depth != 1:
raise ValueError(
"""The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"""
""" heads.""" )
# convert layers
logger.info("""Converting weights...""" )
for full_name, array in zip(_snake_case , _snake_case ):
UpperCAmelCase = full_name.split("""/""" )
UpperCAmelCase = model
UpperCAmelCase = []
for i, m_name in enumerate(_snake_case ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("""layer_with_weights""" ):
UpperCAmelCase = int(m_name.split("""-""" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["""embeddings""", """LayerNorm"""] )
UpperCAmelCase = getattr(_snake_case , """embeddings""" )
UpperCAmelCase = getattr(_snake_case , """LayerNorm""" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["""encoder""", """layer""", str(layer_num - 4 )] )
UpperCAmelCase = getattr(_snake_case , """encoder""" )
UpperCAmelCase = getattr(_snake_case , """layer""" )
UpperCAmelCase = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["""pooler""", """dense"""] )
UpperCAmelCase = getattr(_snake_case , """pooler""" )
UpperCAmelCase = getattr(_snake_case , """dense""" )
elif m_name == "embeddings":
trace.append("""embeddings""" )
UpperCAmelCase = getattr(_snake_case , """embeddings""" )
if layer_num == 0:
trace.append("""word_embeddings""" )
UpperCAmelCase = getattr(_snake_case , """word_embeddings""" )
elif layer_num == 1:
trace.append("""position_embeddings""" )
UpperCAmelCase = getattr(_snake_case , """position_embeddings""" )
elif layer_num == 2:
trace.append("""token_type_embeddings""" )
UpperCAmelCase = getattr(_snake_case , """token_type_embeddings""" )
else:
raise ValueError(F'''Unknown embedding layer with name {full_name}''' )
trace.append("""weight""" )
UpperCAmelCase = getattr(_snake_case , """weight""" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["""attention""", """self"""] )
UpperCAmelCase = getattr(_snake_case , """attention""" )
UpperCAmelCase = getattr(_snake_case , """self""" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["""attention""", """output""", """LayerNorm"""] )
UpperCAmelCase = getattr(_snake_case , """attention""" )
UpperCAmelCase = getattr(_snake_case , """output""" )
UpperCAmelCase = getattr(_snake_case , """LayerNorm""" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["""attention""", """output""", """dense"""] )
UpperCAmelCase = getattr(_snake_case , """attention""" )
UpperCAmelCase = getattr(_snake_case , """output""" )
UpperCAmelCase = getattr(_snake_case , """dense""" )
elif m_name == "_output_dense":
# output dense
trace.extend(["""output""", """dense"""] )
UpperCAmelCase = getattr(_snake_case , """output""" )
UpperCAmelCase = getattr(_snake_case , """dense""" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["""output""", """LayerNorm"""] )
UpperCAmelCase = getattr(_snake_case , """output""" )
UpperCAmelCase = getattr(_snake_case , """LayerNorm""" )
elif m_name == "_key_dense":
# attention key
trace.append("""key""" )
UpperCAmelCase = getattr(_snake_case , """key""" )
elif m_name == "_query_dense":
# attention query
trace.append("""query""" )
UpperCAmelCase = getattr(_snake_case , """query""" )
elif m_name == "_value_dense":
# attention value
trace.append("""value""" )
UpperCAmelCase = getattr(_snake_case , """value""" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["""intermediate""", """dense"""] )
UpperCAmelCase = getattr(_snake_case , """intermediate""" )
UpperCAmelCase = getattr(_snake_case , """dense""" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("""output""" )
UpperCAmelCase = getattr(_snake_case , """output""" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("""bias""" )
UpperCAmelCase = getattr(_snake_case , """bias""" )
elif m_name in ["kernel", "gamma"]:
trace.append("""weight""" )
UpperCAmelCase = getattr(_snake_case , """weight""" )
else:
logger.warning(F'''Ignored {m_name}''' )
# for certain layers reshape is necessary
UpperCAmelCase = """.""".join(_snake_case )
if re.match(R"""(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)""" , _snake_case ) or re.match(
R"""(\S+)\.attention\.output\.dense\.weight""" , _snake_case ):
UpperCAmelCase = array.reshape(pointer.data.shape )
if "kernel" in full_name:
UpperCAmelCase = array.transpose()
if pointer.shape == array.shape:
UpperCAmelCase = torch.from_numpy(_snake_case )
else:
raise ValueError(
F'''Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'''
F''' {array.shape}''' )
logger.info(F'''Successfully set variable {full_name} to PyTorch layer {trace}''' )
return model
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
logger.info(F'''Loading model based on config from {config_path}...''' )
UpperCAmelCase = BertConfig.from_json_file(_snake_case )
UpperCAmelCase = BertModel(_snake_case )
# Load weights from checkpoint
logger.info(F'''Loading weights from checkpoint {tf_checkpoint_path}...''' )
load_tfa_weights_in_bert(_snake_case , _snake_case , _snake_case )
# Save pytorch-model
logger.info(F'''Saving PyTorch model to {pytorch_dump_path}...''' )
torch.save(model.state_dict() , _snake_case )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow 2.x checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model (must include filename).""",
)
_UpperCamelCase = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 711 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import MutableSequence
class lowerCamelCase__ :
def __init__( self ,A ,A ):
if len(A ) != degree + 1:
raise ValueError(
"""The number of coefficients should be equal to the degree + 1.""" )
UpperCAmelCase = list(A )
UpperCAmelCase = degree
def __add__( self ,A ):
if self.degree > polynomial_a.degree:
UpperCAmelCase = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree ,A )
else:
UpperCAmelCase = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree ,A )
def __sub__( self ,A ):
return self + polynomial_a * Polynomial(0 ,[-1] )
def __neg__( self ):
return Polynomial(self.degree ,[-c for c in self.coefficients] )
def __mul__( self ,A ):
UpperCAmelCase = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree ,A )
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ):
UpperCAmelCase = """"""
for i in range(self.degree ,-1 ,-1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(A )
return polynomial
def __repr__( self ):
return self.__str__()
def _UpperCamelCase ( self ):
UpperCAmelCase = [0] * self.degree
for i in range(self.degree ):
UpperCAmelCase = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 ,A )
def _UpperCamelCase ( self ,A = 0 ):
UpperCAmelCase = [0] * (self.degree + 2)
UpperCAmelCase = constant
for i in range(self.degree + 1 ):
UpperCAmelCase = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 ,A )
def __eq__( self ,A ):
if not isinstance(A ,A ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self ,A ):
return not self.__eq__(A )
| 74 | 0 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase_ )
class lowerCamelCase__ ( UpperCamelCase_ ):
SCREAMING_SNAKE_CASE = field(default='''automatic-speech-recognition''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
SCREAMING_SNAKE_CASE = Features({'''audio''': Audio()} )
SCREAMING_SNAKE_CASE = Features({'''transcription''': Value('''string''' )} )
SCREAMING_SNAKE_CASE = '''audio'''
SCREAMING_SNAKE_CASE = '''transcription'''
def _UpperCamelCase ( self ,A ):
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] ,UpperCamelCase__ ):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' )
UpperCAmelCase = copy.deepcopy(self )
UpperCAmelCase = self.input_schema.copy()
UpperCAmelCase = features[self.audio_column]
UpperCAmelCase = input_schema
return task_template
@property
def _UpperCamelCase ( self ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 712 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class lowerCamelCase__ :
def __init__( self ,A ,):
UpperCAmelCase = parent
UpperCAmelCase = 13
UpperCAmelCase = 7
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = 2
UpperCAmelCase = 99
UpperCAmelCase = 0
UpperCAmelCase = 32
UpperCAmelCase = 2
UpperCAmelCase = 4
UpperCAmelCase = 0.1
UpperCAmelCase = 0.1
UpperCAmelCase = 512
UpperCAmelCase = 16
UpperCAmelCase = 2
UpperCAmelCase = 0.02
UpperCAmelCase = 3
UpperCAmelCase = 4
UpperCAmelCase = """last"""
UpperCAmelCase = True
UpperCAmelCase = None
UpperCAmelCase = 0
def _UpperCamelCase ( self ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ,dtype=tf.floataa )
UpperCAmelCase = None
if self.use_input_lengths:
UpperCAmelCase = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] ,2 ,dtype=tf.floataa )
UpperCAmelCase = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase = FlaubertConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,bos_token_id=self.bos_token_id ,)
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = TFFlaubertModel(config=A )
UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCAmelCase = model(A )
UpperCAmelCase = [input_ids, input_mask]
UpperCAmelCase = model(A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = TFFlaubertWithLMHeadModel(A )
UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCAmelCase = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = TFFlaubertForQuestionAnsweringSimple(A )
UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCAmelCase = model(A )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = TFFlaubertForSequenceClassification(A )
UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCAmelCase = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = self.num_labels
UpperCAmelCase = TFFlaubertForTokenClassification(config=A )
UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCAmelCase = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = self.num_choices
UpperCAmelCase = TFFlaubertForMultipleChoice(config=A )
UpperCAmelCase = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
UpperCAmelCase = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""langs""": token_type_ids,
"""lengths""": input_lengths,
}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( snake_case , snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': TFFlaubertModel,
'''fill-mask''': TFFlaubertWithLMHeadModel,
'''question-answering''': TFFlaubertForQuestionAnsweringSimple,
'''text-classification''': TFFlaubertForSequenceClassification,
'''token-classification''': TFFlaubertForTokenClassification,
'''zero-shot''': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _UpperCamelCase ( self ):
UpperCAmelCase = TFFlaubertModelTester(self )
UpperCAmelCase = ConfigTester(self ,config_class=A ,emb_dim=37 )
def _UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*A )
@slow
def _UpperCamelCase ( self ):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = TFFlaubertModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self ):
UpperCAmelCase = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
UpperCAmelCase = tf.convert_to_tensor(
[[0, 158, 735, 2_592, 1_424, 6_727, 82, 1]] ,dtype=tf.intaa ,) # "J'aime flaubert !"
UpperCAmelCase = model(A )[0]
UpperCAmelCase = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape ,A )
# compare the actual values for a slice.
UpperCAmelCase = tf.convert_to_tensor(
[
[
[-1.8768773, -1.566555, 0.27072418],
[-1.6920038, -0.5873505, 1.9329599],
[-2.9563985, -1.6993835, 1.7972052],
]
] ,dtype=tf.floataa ,)
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 74 | 0 |
"""simple docstring"""
from __future__ import annotations
def _a ( _snake_case , _snake_case , _snake_case ) -> dict[str, float]:
"""simple docstring"""
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance < 0:
raise ValueError("""Resistance cannot be negative""" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith("""module.encoder""" ):
UpperCAmelCase = key.replace("""module.encoder""" , """glpn.encoder""" )
if key.startswith("""module.decoder""" ):
UpperCAmelCase = key.replace("""module.decoder""" , """decoder.stages""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCAmelCase = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
UpperCAmelCase = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(_snake_case )-1}''' )
if "norm" in key:
UpperCAmelCase = key.replace("""norm""" , """layer_norm""" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCAmelCase = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )]
UpperCAmelCase = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(_snake_case )-1}''' )
if "layer_norm1" in key:
UpperCAmelCase = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
UpperCAmelCase = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
UpperCAmelCase = key[key.find("""block""" ) + len("""block""" )]
UpperCAmelCase = key.replace(F'''block{idx}''' , F'''block.{int(_snake_case )-1}''' )
if "attn.q" in key:
UpperCAmelCase = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
UpperCAmelCase = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
UpperCAmelCase = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
UpperCAmelCase = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
UpperCAmelCase = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
UpperCAmelCase = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
UpperCAmelCase = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
UpperCAmelCase = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCAmelCase = key[key.find("""linear_c""" ) + len("""linear_c""" )]
UpperCAmelCase = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(_snake_case )-1}''' )
if "bot_conv" in key:
UpperCAmelCase = key.replace("""bot_conv""" , """0.convolution""" )
if "skip_conv1" in key:
UpperCAmelCase = key.replace("""skip_conv1""" , """1.convolution""" )
if "skip_conv2" in key:
UpperCAmelCase = key.replace("""skip_conv2""" , """2.convolution""" )
if "fusion1" in key:
UpperCAmelCase = key.replace("""fusion1""" , """1.fusion""" )
if "fusion2" in key:
UpperCAmelCase = key.replace("""fusion2""" , """2.fusion""" )
if "fusion3" in key:
UpperCAmelCase = key.replace("""fusion3""" , """3.fusion""" )
if "fusion" in key and "conv" in key:
UpperCAmelCase = key.replace("""conv""" , """convolutional_layer""" )
if key.startswith("""module.last_layer_depth""" ):
UpperCAmelCase = key.replace("""module.last_layer_depth""" , """head.head""" )
UpperCAmelCase = value
return new_state_dict
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCAmelCase = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
UpperCAmelCase = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
UpperCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
UpperCAmelCase = kv_bias[: config.hidden_sizes[i]]
UpperCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCAmelCase = kv_bias[config.hidden_sizes[i] :]
def _a ( ):
"""simple docstring"""
UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return image
@torch.no_grad()
def _a ( _snake_case , _snake_case , _snake_case=False , _snake_case=None ):
"""simple docstring"""
UpperCAmelCase = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
UpperCAmelCase = GLPNImageProcessor()
# prepare image
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=_snake_case , return_tensors="""pt""" ).pixel_values
logger.info("""Converting model...""" )
# load original state dict
UpperCAmelCase = torch.load(_snake_case , map_location=torch.device("""cpu""" ) )
# rename keys
UpperCAmelCase = rename_keys(_snake_case )
# key and value matrices need special treatment
read_in_k_v(_snake_case , _snake_case )
# create HuggingFace model and load state dict
UpperCAmelCase = GLPNForDepthEstimation(_snake_case )
model.load_state_dict(_snake_case )
model.eval()
# forward pass
UpperCAmelCase = model(_snake_case )
UpperCAmelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
UpperCAmelCase = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
UpperCAmelCase = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
UpperCAmelCase = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , _snake_case , atol=1E-4 )
print("""Looks ok!""" )
# finally, push to hub if required
if push_to_hub:
logger.info("""Pushing model and image processor to the hub...""" )
model.push_to_hub(
repo_path_or_name=Path(_snake_case , _snake_case ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_snake_case , )
image_processor.push_to_hub(
repo_path_or_name=Path(_snake_case , _snake_case ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_snake_case , )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
_UpperCamelCase = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 74 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
_UpperCamelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_UpperCamelCase = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
_UpperCamelCase = {
"""unc-nlp/lxmert-base-uncased""": 512,
}
_UpperCamelCase = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class lowerCamelCase__ ( UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = LxmertTokenizer
def __init__( self ,A=None ,A=None ,A=True ,A="[UNK]" ,A="[SEP]" ,A="[PAD]" ,A="[CLS]" ,A="[MASK]" ,A=True ,A=None ,**A ,):
super().__init__(
A ,tokenizer_file=A ,do_lower_case=A ,unk_token=A ,sep_token=A ,pad_token=A ,cls_token=A ,mask_token=A ,tokenize_chinese_chars=A ,strip_accents=A ,**A ,)
UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,A ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,A ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,A ) != tokenize_chinese_chars
):
UpperCAmelCase = getattr(A ,normalizer_state.pop("""type""" ) )
UpperCAmelCase = do_lower_case
UpperCAmelCase = strip_accents
UpperCAmelCase = tokenize_chinese_chars
UpperCAmelCase = normalizer_class(**A )
UpperCAmelCase = do_lower_case
def _UpperCamelCase ( self ,A ,A=None ):
UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCamelCase ( self ,A ,A = None ):
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self ,A ,A = None ):
UpperCAmelCase = self._tokenizer.model.save(A ,name=A )
return tuple(A )
| 714 |
"""simple docstring"""
def _a ( _snake_case ): # noqa: E741
"""simple docstring"""
UpperCAmelCase = len(_snake_case )
UpperCAmelCase = 0
UpperCAmelCase = [0] * n
UpperCAmelCase = [False] * n
UpperCAmelCase = [False] * n
def dfs(_snake_case , _snake_case , _snake_case , _snake_case ):
if parent == root:
out_edge_count += 1
UpperCAmelCase = True
UpperCAmelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
UpperCAmelCase = dfs(_snake_case , _snake_case , _snake_case , _snake_case )
UpperCAmelCase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
UpperCAmelCase = True
# AP found via cycle
if at == low[to]:
UpperCAmelCase = True
else:
UpperCAmelCase = min(low[at] , _snake_case )
return out_edge_count
for i in range(_snake_case ):
if not visited[i]:
UpperCAmelCase = 0
UpperCAmelCase = dfs(_snake_case , _snake_case , -1 , _snake_case )
UpperCAmelCase = out_edge_count > 1
for x in range(len(_snake_case ) ):
if is_art[x] is True:
print(_snake_case )
# Adjacency list of graph
_UpperCamelCase = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 74 | 0 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCamelCase ( self ):
UpperCAmelCase = inspect.getfile(accelerate.test_utils )
UpperCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
UpperCAmelCase = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_distributed_data_loop.py"""] )
UpperCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_ops.py"""] )
@require_multi_gpu
def _UpperCamelCase ( self ):
print(F'''Found {torch.cuda.device_count()} devices.''' )
UpperCAmelCase = ["""torchrun""", F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__lowercase ,env=os.environ.copy() )
@require_multi_gpu
def _UpperCamelCase ( self ):
print(F'''Found {torch.cuda.device_count()} devices.''' )
UpperCAmelCase = ["""torchrun""", F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(F'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__lowercase ,env=os.environ.copy() )
@require_multi_gpu
def _UpperCamelCase ( self ):
UpperCAmelCase = ["""torchrun""", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__lowercase ,env=os.environ.copy() )
@require_multi_gpu
def _UpperCamelCase ( self ):
print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
UpperCAmelCase = ["""torchrun""", F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 ,cuda_visible_devices="""0,1""" ):
execute_subprocess_async(__lowercase ,env=os.environ.copy() )
if __name__ == "__main__":
_UpperCamelCase = Accelerator()
_UpperCamelCase = (accelerator.state.process_index + 2, 10)
_UpperCamelCase = torch.randint(0, 10, shape).to(accelerator.device)
_UpperCamelCase = """"""
_UpperCamelCase = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_UpperCamelCase = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_UpperCamelCase = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 715 |
"""simple docstring"""
_UpperCamelCase = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_UpperCamelCase = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
_UpperCamelCase = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 74 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class lowerCamelCase__ :
def __init__( self ,A = 6 ):
UpperCAmelCase = None
UpperCAmelCase = None
self.create_linked_list(A_ )
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = Node()
UpperCAmelCase = current_node
UpperCAmelCase = current_node
UpperCAmelCase = current_node
for _ in range(1 ,A_ ):
UpperCAmelCase = Node()
UpperCAmelCase = current_node
UpperCAmelCase = previous_node
UpperCAmelCase = current_node
UpperCAmelCase = self.front
UpperCAmelCase = previous_node
def _UpperCamelCase ( self ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def _UpperCamelCase ( self ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def _UpperCamelCase ( self ,A ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
UpperCAmelCase = self.rear.next
if self.rear:
UpperCAmelCase = data
def _UpperCamelCase ( self ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
UpperCAmelCase = self.front.data
UpperCAmelCase = None
return data
UpperCAmelCase = self.front
UpperCAmelCase = old_front.next
UpperCAmelCase = old_front.data
UpperCAmelCase = None
return data
def _UpperCamelCase ( self ):
if self.is_empty():
raise Exception("""Empty Queue""" )
def _UpperCamelCase ( self ):
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class lowerCamelCase__ :
def __init__( self ):
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
"""simple docstring"""
import argparse
import struct
import unittest
class lowerCamelCase__ :
def __init__( self ,A ):
UpperCAmelCase = data
# Initialize hash values
UpperCAmelCase = [
0x6A_09_E6_67,
0xBB_67_AE_85,
0x3C_6E_F3_72,
0xA5_4F_F5_3A,
0x51_0E_52_7F,
0x9B_05_68_8C,
0x1F_83_D9_AB,
0x5B_E0_CD_19,
]
# Initialize round constants
UpperCAmelCase = [
0x42_8A_2F_98,
0x71_37_44_91,
0xB5_C0_FB_CF,
0xE9_B5_DB_A5,
0x39_56_C2_5B,
0x59_F1_11_F1,
0x92_3F_82_A4,
0xAB_1C_5E_D5,
0xD8_07_AA_98,
0x12_83_5B_01,
0x24_31_85_BE,
0x55_0C_7D_C3,
0x72_BE_5D_74,
0x80_DE_B1_FE,
0x9B_DC_06_A7,
0xC1_9B_F1_74,
0xE4_9B_69_C1,
0xEF_BE_47_86,
0x0F_C1_9D_C6,
0x24_0C_A1_CC,
0x2D_E9_2C_6F,
0x4A_74_84_AA,
0x5C_B0_A9_DC,
0x76_F9_88_DA,
0x98_3E_51_52,
0xA8_31_C6_6D,
0xB0_03_27_C8,
0xBF_59_7F_C7,
0xC6_E0_0B_F3,
0xD5_A7_91_47,
0x06_CA_63_51,
0x14_29_29_67,
0x27_B7_0A_85,
0x2E_1B_21_38,
0x4D_2C_6D_FC,
0x53_38_0D_13,
0x65_0A_73_54,
0x76_6A_0A_BB,
0x81_C2_C9_2E,
0x92_72_2C_85,
0xA2_BF_E8_A1,
0xA8_1A_66_4B,
0xC2_4B_8B_70,
0xC7_6C_51_A3,
0xD1_92_E8_19,
0xD6_99_06_24,
0xF4_0E_35_85,
0x10_6A_A0_70,
0x19_A4_C1_16,
0x1E_37_6C_08,
0x27_48_77_4C,
0x34_B0_BC_B5,
0x39_1C_0C_B3,
0x4E_D8_AA_4A,
0x5B_9C_CA_4F,
0x68_2E_6F_F3,
0x74_8F_82_EE,
0x78_A5_63_6F,
0x84_C8_78_14,
0x8C_C7_02_08,
0x90_BE_FF_FA,
0xA4_50_6C_EB,
0xBE_F9_A3_F7,
0xC6_71_78_F2,
]
UpperCAmelCase = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _UpperCamelCase ( A ):
UpperCAmelCase = b"""\x80""" + (b"""\x00""" * (63 - (len(A ) + 8) % 64))
UpperCAmelCase = struct.pack(""">Q""" ,(len(A ) * 8) )
return data + padding + big_endian_integer
def _UpperCamelCase ( self ):
# Convert into blocks of 64 bytes
UpperCAmelCase = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCAmelCase = list(struct.unpack(""">16L""" ,A ) )
# add 48 0-ed integers
words += [0] * 48
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
UpperCAmelCase = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
UpperCAmelCase = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
UpperCAmelCase = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_00_00_00_00
# Compression
UpperCAmelCase = self.ror(A ,6 ) ^ self.ror(A ,11 ) ^ self.ror(A ,25 )
UpperCAmelCase = (e & f) ^ ((~e & 0xFF_FF_FF_FF) & g)
UpperCAmelCase = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_00_00_00_00
UpperCAmelCase = self.ror(A ,2 ) ^ self.ror(A ,13 ) ^ self.ror(A ,22 )
UpperCAmelCase = (a & b) ^ (a & c) ^ (b & c)
UpperCAmelCase = (sa + maj) % 0x1_00_00_00_00
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = (
g,
f,
e,
((d + tempa) % 0x1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0x1_00_00_00_00),
)
UpperCAmelCase = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCAmelCase = [
((element + mutated_hash_values[index]) % 0x1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
UpperCAmelCase = """""".join([hex(A )[2:].zfill(8 ) for value in self.hashes] )
def _UpperCamelCase ( self ,A ,A ):
return 0xFF_FF_FF_FF & (value << (32 - rotations)) | (value >> rotations)
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCamelCase ( self ):
import hashlib
UpperCAmelCase = bytes("""Test String""" ,"""utf-8""" )
self.assertEqual(SHAaaa(A ).hash ,hashlib.shaaaa(A ).hexdigest() )
def _a ( ):
"""simple docstring"""
import doctest
doctest.testmod()
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
UpperCAmelCase = f.read()
else:
UpperCAmelCase = bytes(_snake_case , """utf-8""" )
print(SHAaaa(_snake_case ).hash )
if __name__ == "__main__":
main()
| 74 | 0 |
"""simple docstring"""
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(_snake_case ) )
def _a ( _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
if index == len(_snake_case ):
return True
# Recursive Step
for i in range(_snake_case ):
if valid_coloring(graph[index] , _snake_case , _snake_case ):
# Color current vertex
UpperCAmelCase = i
# Validate coloring
if util_color(_snake_case , _snake_case , _snake_case , index + 1 ):
return True
# Backtrack
UpperCAmelCase = -1
return False
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = [-1] * len(_snake_case )
if util_color(_snake_case , _snake_case , _snake_case , 0 ):
return colored_vertices
return []
| 717 |
"""simple docstring"""
def _a ( _snake_case = 10 , _snake_case = 22 ):
"""simple docstring"""
UpperCAmelCase = range(1 , _snake_case )
UpperCAmelCase = range(1 , _snake_case )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"""{solution(10, 22) = }""")
| 74 | 0 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
_UpperCamelCase = ["""bert-base-uncased""", """bert-base-cased"""]
_UpperCamelCase = """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class lowerCamelCase__ ( tf.keras.Model ):
def __init__( self ,A ):
super().__init__()
UpperCAmelCase = tokenizer
UpperCAmelCase = AutoConfig.from_pretrained(lowerCamelCase__ )
UpperCAmelCase = TFAutoModel.from_config(lowerCamelCase__ )
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = self.tokenizer(lowerCamelCase__ )
UpperCAmelCase = self.bert(**lowerCamelCase__ )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCamelCase ( self ):
super().setUp()
UpperCAmelCase = [
BertTokenizer.from_pretrained(lowerCamelCase__ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
UpperCAmelCase = [TFBertTokenizer.from_pretrained(lowerCamelCase__ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(lowerCamelCase__ ,use_fast_bert_tokenizer=lowerCamelCase__ )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
UpperCAmelCase = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
UpperCAmelCase = list(zip(self.test_sentences ,self.test_sentences[::-1] ) )
def _UpperCamelCase ( self ):
for tokenizer, tf_tokenizer in zip(self.tokenizers ,self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCAmelCase = tokenizer(lowerCamelCase__ ,return_tensors="""tf""" ,padding="""longest""" )
UpperCAmelCase = tf_tokenizer(lowerCamelCase__ )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] ,tf.intaa ) == tf_outputs[key] ) )
@slow
def _UpperCamelCase ( self ):
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase = tf_tokenizer(self.paired_sentences )
UpperCAmelCase = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] ,text_pair=[sentence[1] for sentence in self.paired_sentences] ,)
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] ,tf.intaa ) == separated_outputs[key] ) )
@slow
def _UpperCamelCase ( self ):
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase = tf.function(lowerCamelCase__ )
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCAmelCase = tf.constant(lowerCamelCase__ )
UpperCAmelCase = compiled_tokenizer(lowerCamelCase__ )
UpperCAmelCase = tf_tokenizer(lowerCamelCase__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _UpperCamelCase ( self ):
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase = ModelToSave(tokenizer=lowerCamelCase__ )
UpperCAmelCase = tf.convert_to_tensor(self.test_sentences )
UpperCAmelCase = model(lowerCamelCase__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCAmelCase = Path(lowerCamelCase__ ) / "saved.model"
model.save(lowerCamelCase__ )
UpperCAmelCase = tf.keras.models.load_model(lowerCamelCase__ )
UpperCAmelCase = loaded_model(lowerCamelCase__ )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) ,1e-5 )
| 718 |
"""simple docstring"""
from __future__ import annotations
def _a ( _snake_case ):
"""simple docstring"""
return len(set(_snake_case ) ) == len(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 | 0 |
"""simple docstring"""
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = F'''{sampling_rate}'''
UpperCAmelCase = """1"""
UpperCAmelCase = """f32le"""
UpperCAmelCase = [
"""ffmpeg""",
"""-i""",
"""pipe:0""",
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
try:
with subprocess.Popen(lowercase__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
UpperCAmelCase = ffmpeg_process.communicate(lowercase__ )
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error
UpperCAmelCase = output_stream[0]
UpperCAmelCase = np.frombuffer(lowercase__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError("""Malformed soundfile""" )
return audio
def _a ( _snake_case , _snake_case , _snake_case = "f32le" , ):
"""simple docstring"""
UpperCAmelCase = F'''{sampling_rate}'''
UpperCAmelCase = """1"""
if format_for_conversion == "s16le":
UpperCAmelCase = 2
elif format_for_conversion == "f32le":
UpperCAmelCase = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
UpperCAmelCase = platform.system()
if system == "Linux":
UpperCAmelCase = """alsa"""
UpperCAmelCase = """default"""
elif system == "Darwin":
UpperCAmelCase = """avfoundation"""
UpperCAmelCase = """:0"""
elif system == "Windows":
UpperCAmelCase = """dshow"""
UpperCAmelCase = """default"""
UpperCAmelCase = [
"""ffmpeg""",
"""-f""",
format_,
"""-i""",
input_,
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-fflags""",
"""nobuffer""",
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
UpperCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
UpperCAmelCase = _ffmpeg_stream(lowercase__ , lowercase__ )
for item in iterator:
yield item
def _a ( _snake_case , _snake_case , _snake_case = None , _snake_case = None , _snake_case = "f32le" , ):
"""simple docstring"""
if stream_chunk_s is not None:
UpperCAmelCase = stream_chunk_s
else:
UpperCAmelCase = chunk_length_s
UpperCAmelCase = ffmpeg_microphone(lowercase__ , lowercase__ , format_for_conversion=lowercase__ )
if format_for_conversion == "s16le":
UpperCAmelCase = np.intaa
UpperCAmelCase = 2
elif format_for_conversion == "f32le":
UpperCAmelCase = np.floataa
UpperCAmelCase = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
UpperCAmelCase = chunk_length_s / 6
UpperCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowercase__ , (int, float) ):
UpperCAmelCase = [stride_length_s, stride_length_s]
UpperCAmelCase = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
UpperCAmelCase = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
UpperCAmelCase = datetime.datetime.now()
UpperCAmelCase = datetime.timedelta(seconds=lowercase__ )
for item in chunk_bytes_iter(lowercase__ , lowercase__ , stride=(stride_left, stride_right) , stream=lowercase__ ):
# Put everything back in numpy scale
UpperCAmelCase = np.frombuffer(item["""raw"""] , dtype=lowercase__ )
UpperCAmelCase = (
item["""stride"""][0] // size_of_sample,
item["""stride"""][1] // size_of_sample,
)
UpperCAmelCase = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def _a ( _snake_case , _snake_case , _snake_case , _snake_case = False ):
"""simple docstring"""
UpperCAmelCase = b""""""
UpperCAmelCase , UpperCAmelCase = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
UpperCAmelCase = 0
for raw in iterator:
acc += raw
if stream and len(lowercase__ ) < chunk_len:
UpperCAmelCase = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowercase__ ) >= chunk_len:
# We are flushing the accumulator
UpperCAmelCase = (_stride_left, stride_right)
UpperCAmelCase = {"""raw""": acc[:chunk_len], """stride""": stride}
if stream:
UpperCAmelCase = False
yield item
UpperCAmelCase = stride_left
UpperCAmelCase = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowercase__ ) > stride_left:
UpperCAmelCase = {"""raw""": acc, """stride""": (_stride_left, 0)}
if stream:
UpperCAmelCase = False
yield item
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = 2**24 # 16Mo
try:
with subprocess.Popen(lowercase__ , stdout=subprocess.PIPE , bufsize=lowercase__ ) as ffmpeg_process:
while True:
UpperCAmelCase = ffmpeg_process.stdout.read(lowercase__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
| 719 |
"""simple docstring"""
import math
def _a ( _snake_case ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _a ( _snake_case = 0.1 ):
"""simple docstring"""
UpperCAmelCase = 3
UpperCAmelCase = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_snake_case )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 | 0 |
"""simple docstring"""
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'facebook/data2vec-base-960h': 'https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowerCamelCase__ ( __lowercase ):
SCREAMING_SNAKE_CASE = '''data2vec-audio'''
def __init__( self ,A=32 ,A=768 ,A=12 ,A=12 ,A=3_072 ,A="gelu" ,A=0.1 ,A=0.1 ,A=0.1 ,A=0.0 ,A=0.1 ,A=0.1 ,A=0.02 ,A=1e-5 ,A="gelu" ,A=(512, 512, 512, 512, 512, 512, 512) ,A=(5, 2, 2, 2, 2, 2, 2) ,A=(10, 3, 3, 3, 3, 2, 2) ,A=False ,A=16 ,A=19 ,A=5 ,A=0.05 ,A=10 ,A=2 ,A=0.0 ,A=10 ,A=0 ,A="sum" ,A=False ,A=False ,A=256 ,A=(512, 512, 512, 512, 1_500) ,A=(5, 3, 3, 1, 1) ,A=(1, 2, 3, 1, 1) ,A=512 ,A=0 ,A=1 ,A=2 ,A=False ,A=3 ,A=2 ,A=3 ,A=None ,**A ,):
super().__init__(**__a ,pad_token_id=__a ,bos_token_id=__a ,eos_token_id=__a )
UpperCAmelCase = hidden_size
UpperCAmelCase = feat_extract_activation
UpperCAmelCase = list(__a )
UpperCAmelCase = list(__a )
UpperCAmelCase = list(__a )
UpperCAmelCase = conv_bias
UpperCAmelCase = num_conv_pos_embeddings
UpperCAmelCase = num_conv_pos_embedding_groups
UpperCAmelCase = conv_pos_kernel_size
UpperCAmelCase = len(self.conv_dim )
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = feat_proj_dropout
UpperCAmelCase = final_dropout
UpperCAmelCase = layerdrop
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = initializer_range
UpperCAmelCase = vocab_size
UpperCAmelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase = mask_time_prob
UpperCAmelCase = mask_time_length
UpperCAmelCase = mask_time_min_masks
UpperCAmelCase = mask_feature_prob
UpperCAmelCase = mask_feature_length
UpperCAmelCase = mask_feature_min_masks
# ctc loss
UpperCAmelCase = ctc_loss_reduction
UpperCAmelCase = ctc_zero_infinity
# adapter
UpperCAmelCase = add_adapter
UpperCAmelCase = adapter_kernel_size
UpperCAmelCase = adapter_stride
UpperCAmelCase = num_adapter_layers
UpperCAmelCase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase = list(__a )
UpperCAmelCase = list(__a )
UpperCAmelCase = list(__a )
UpperCAmelCase = xvector_output_dim
@property
def _UpperCamelCase ( self ):
return math.prod(self.conv_stride )
| 720 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE = '''CLIPImageProcessor'''
SCREAMING_SNAKE_CASE = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self ,A=None ,A=None ,**A ):
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,A ,)
UpperCAmelCase = kwargs.pop("""feature_extractor""" )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(A ,A )
def __call__( self ,A=None ,A=None ,A=None ,**A ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
UpperCAmelCase = self.tokenizer(A ,return_tensors=A ,**A )
if images is not None:
UpperCAmelCase = self.image_processor(A ,return_tensors=A ,**A )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A ) ,tensor_type=A )
def _UpperCamelCase ( self ,*A ,**A ):
return self.tokenizer.batch_decode(*A ,**A )
def _UpperCamelCase ( self ,*A ,**A ):
return self.tokenizer.decode(*A ,**A )
@property
def _UpperCamelCase ( self ):
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _UpperCamelCase ( self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,A ,)
return self.image_processor_class
@property
def _UpperCamelCase ( self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,A ,)
return self.image_processor
| 74 | 0 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCamelCase ( self ):
UpperCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(UpperCAmelCase__ )
UpperCAmelCase = -1
UpperCAmelCase = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(UpperCAmelCase__ )
UpperCAmelCase = model.generate(UpperCAmelCase__ ,max_new_tokens=10 ,do_sample=UpperCAmelCase__ )
UpperCAmelCase = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
UpperCAmelCase = TextStreamer(UpperCAmelCase__ )
model.generate(UpperCAmelCase__ ,max_new_tokens=10 ,do_sample=UpperCAmelCase__ ,streamer=UpperCAmelCase__ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCAmelCase = cs.out[:-1]
self.assertEqual(UpperCAmelCase__ ,UpperCAmelCase__ )
def _UpperCamelCase ( self ):
UpperCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(UpperCAmelCase__ )
UpperCAmelCase = -1
UpperCAmelCase = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(UpperCAmelCase__ )
UpperCAmelCase = model.generate(UpperCAmelCase__ ,max_new_tokens=10 ,do_sample=UpperCAmelCase__ )
UpperCAmelCase = tokenizer.decode(greedy_ids[0] )
UpperCAmelCase = TextIteratorStreamer(UpperCAmelCase__ )
UpperCAmelCase = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
UpperCAmelCase = Thread(target=model.generate ,kwargs=UpperCAmelCase__ )
thread.start()
UpperCAmelCase = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(UpperCAmelCase__ ,UpperCAmelCase__ )
def _UpperCamelCase ( self ):
UpperCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(UpperCAmelCase__ )
UpperCAmelCase = -1
UpperCAmelCase = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(UpperCAmelCase__ )
UpperCAmelCase = model.generate(UpperCAmelCase__ ,max_new_tokens=10 ,do_sample=UpperCAmelCase__ )
UpperCAmelCase = greedy_ids[:, input_ids.shape[1] :]
UpperCAmelCase = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
UpperCAmelCase = TextStreamer(UpperCAmelCase__ ,skip_prompt=UpperCAmelCase__ )
model.generate(UpperCAmelCase__ ,max_new_tokens=10 ,do_sample=UpperCAmelCase__ ,streamer=UpperCAmelCase__ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCAmelCase = cs.out[:-1]
self.assertEqual(UpperCAmelCase__ ,UpperCAmelCase__ )
def _UpperCamelCase ( self ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
UpperCAmelCase = AutoTokenizer.from_pretrained("""distilgpt2""" )
UpperCAmelCase = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(UpperCAmelCase__ )
UpperCAmelCase = -1
UpperCAmelCase = torch.ones((1, 5) ,device=UpperCAmelCase__ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
UpperCAmelCase = TextStreamer(UpperCAmelCase__ ,skip_special_tokens=UpperCAmelCase__ )
model.generate(UpperCAmelCase__ ,max_new_tokens=1 ,do_sample=UpperCAmelCase__ ,streamer=UpperCAmelCase__ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
UpperCAmelCase = cs.out[:-1] # Remove the final "\n"
UpperCAmelCase = tokenizer(UpperCAmelCase__ ,return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape ,(1, 1) )
def _UpperCamelCase ( self ):
UpperCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(UpperCAmelCase__ )
UpperCAmelCase = -1
UpperCAmelCase = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(UpperCAmelCase__ )
UpperCAmelCase = TextIteratorStreamer(UpperCAmelCase__ ,timeout=0.001 )
UpperCAmelCase = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
UpperCAmelCase = Thread(target=model.generate ,kwargs=UpperCAmelCase__ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(UpperCAmelCase__ ):
UpperCAmelCase = """"""
for new_text in streamer:
streamer_text += new_text
| 721 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
_UpperCamelCase = """https://www.indeed.co.in/jobs?q=mobile+app+development&l="""
def _a ( _snake_case = "mumbai" ):
"""simple docstring"""
UpperCAmelCase = BeautifulSoup(requests.get(url + location ).content , """html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""" , attrs={"""data-tn-component""": """organicJob"""} ):
UpperCAmelCase = job.find("""a""" , attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
UpperCAmelCase = job.find("""span""" , {"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("""Bangalore"""), 1):
print(F"""Job {i:>2} is {job[0]} at {job[1]}""")
| 74 | 0 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase__ ( __a ):
SCREAMING_SNAKE_CASE = (PNDMScheduler,)
SCREAMING_SNAKE_CASE = (('''num_inference_steps''', 50),)
def _UpperCamelCase ( self ,**A ):
UpperCAmelCase = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**A__ )
return config
def _UpperCamelCase ( self ,A=0 ,**A ):
UpperCAmelCase = dict(self.forward_default_kwargs )
UpperCAmelCase = kwargs.pop("""num_inference_steps""" ,A__ )
UpperCAmelCase = self.dummy_sample
UpperCAmelCase = 0.1 * sample
UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase = self.get_scheduler_config(**A__ )
UpperCAmelCase = scheduler_class(**A__ )
scheduler.set_timesteps(A__ )
# copy over dummy past residuals
UpperCAmelCase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A__ )
UpperCAmelCase = scheduler_class.from_pretrained(A__ )
new_scheduler.set_timesteps(A__ )
# copy over dummy past residuals
UpperCAmelCase = dummy_past_residuals[:]
UpperCAmelCase = scheduler.step_prk(A__ ,A__ ,A__ ,**A__ ).prev_sample
UpperCAmelCase = new_scheduler.step_prk(A__ ,A__ ,A__ ,**A__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase = scheduler.step_plms(A__ ,A__ ,A__ ,**A__ ).prev_sample
UpperCAmelCase = new_scheduler.step_plms(A__ ,A__ ,A__ ,**A__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ,A=0 ,**A ):
UpperCAmelCase = dict(self.forward_default_kwargs )
UpperCAmelCase = kwargs.pop("""num_inference_steps""" ,A__ )
UpperCAmelCase = self.dummy_sample
UpperCAmelCase = 0.1 * sample
UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**A__ )
scheduler.set_timesteps(A__ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A__ )
UpperCAmelCase = scheduler_class.from_pretrained(A__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(A__ )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase = dummy_past_residuals[:]
UpperCAmelCase = scheduler.step_prk(A__ ,A__ ,A__ ,**A__ ).prev_sample
UpperCAmelCase = new_scheduler.step_prk(A__ ,A__ ,A__ ,**A__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase = scheduler.step_plms(A__ ,A__ ,A__ ,**A__ ).prev_sample
UpperCAmelCase = new_scheduler.step_plms(A__ ,A__ ,A__ ,**A__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _UpperCamelCase ( self ,**A ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(**A__ )
UpperCAmelCase = scheduler_class(**A__ )
UpperCAmelCase = 10
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(A__ )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCAmelCase = model(A__ ,A__ )
UpperCAmelCase = scheduler.step_prk(A__ ,A__ ,A__ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCAmelCase = model(A__ ,A__ )
UpperCAmelCase = scheduler.step_plms(A__ ,A__ ,A__ ).prev_sample
return sample
def _UpperCamelCase ( self ):
UpperCAmelCase = dict(self.forward_default_kwargs )
UpperCAmelCase = kwargs.pop("""num_inference_steps""" ,A__ )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**A__ )
UpperCAmelCase = self.dummy_sample
UpperCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(A__ ,"""set_timesteps""" ):
scheduler.set_timesteps(A__ )
elif num_inference_steps is not None and not hasattr(A__ ,"""set_timesteps""" ):
UpperCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase = dummy_past_residuals[:]
UpperCAmelCase = scheduler.step_prk(A__ ,0 ,A__ ,**A__ ).prev_sample
UpperCAmelCase = scheduler.step_prk(A__ ,1 ,A__ ,**A__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
UpperCAmelCase = scheduler.step_plms(A__ ,0 ,A__ ,**A__ ).prev_sample
UpperCAmelCase = scheduler.step_plms(A__ ,1 ,A__ ,**A__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def _UpperCamelCase ( self ):
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=A__ )
def _UpperCamelCase ( self ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=A__ )
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase = scheduler_class(**A__ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps ,torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) ,)
def _UpperCamelCase ( self ):
for beta_start, beta_end in zip([0.0001, 0.001] ,[0.002, 0.02] ):
self.check_over_configs(beta_start=A__ ,beta_end=A__ )
def _UpperCamelCase ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A__ )
def _UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A__ )
def _UpperCamelCase ( self ):
for t in [1, 5, 10]:
self.check_over_forward(time_step=A__ )
def _UpperCamelCase ( self ):
for t, num_inference_steps in zip([1, 5, 10] ,[10, 50, 100] ):
self.check_over_forward(num_inference_steps=A__ )
def _UpperCamelCase ( self ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCAmelCase = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase = self.dummy_sample
UpperCAmelCase = 0.1 * sample
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**A__ )
scheduler.set_timesteps(A__ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCAmelCase = scheduler.step_prk(A__ ,A__ ,A__ ).prev_sample
def _UpperCamelCase ( self ):
with self.assertRaises(A__ ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**A__ )
scheduler.step_plms(self.dummy_sample ,1 ,self.dummy_sample ).prev_sample
def _UpperCamelCase ( self ):
UpperCAmelCase = self.full_loop()
UpperCAmelCase = torch.sum(torch.abs(A__ ) )
UpperCAmelCase = torch.mean(torch.abs(A__ ) )
assert abs(result_sum.item() - 198.1_318 ) < 1e-2
assert abs(result_mean.item() - 0.2580 ) < 1e-3
def _UpperCamelCase ( self ):
UpperCAmelCase = self.full_loop(prediction_type="""v_prediction""" )
UpperCAmelCase = torch.sum(torch.abs(A__ ) )
UpperCAmelCase = torch.mean(torch.abs(A__ ) )
assert abs(result_sum.item() - 67.3_986 ) < 1e-2
assert abs(result_mean.item() - 0.0878 ) < 1e-3
def _UpperCamelCase ( self ):
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase = self.full_loop(set_alpha_to_one=A__ ,beta_start=0.01 )
UpperCAmelCase = torch.sum(torch.abs(A__ ) )
UpperCAmelCase = torch.mean(torch.abs(A__ ) )
assert abs(result_sum.item() - 230.0_399 ) < 1e-2
assert abs(result_mean.item() - 0.2995 ) < 1e-3
def _UpperCamelCase ( self ):
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase = self.full_loop(set_alpha_to_one=A__ ,beta_start=0.01 )
UpperCAmelCase = torch.sum(torch.abs(A__ ) )
UpperCAmelCase = torch.mean(torch.abs(A__ ) )
assert abs(result_sum.item() - 186.9_482 ) < 1e-2
assert abs(result_mean.item() - 0.2434 ) < 1e-3
| 700 |
"""simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCamelCase ( self ):
UpperCAmelCase = ["""a""", """b""", """c"""]
# Defaults to last layer if both are None
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(A ,A ,A )
self.assertEqual(A ,["""c"""] )
self.assertEqual(A ,[2] )
# Out indices set to match out features
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(["""a""", """c"""] ,A ,A )
self.assertEqual(A ,["""a""", """c"""] )
self.assertEqual(A ,[0, 2] )
# Out features set to match out indices
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(A ,[0, 2] ,A )
self.assertEqual(A ,["""a""", """c"""] )
self.assertEqual(A ,[0, 2] )
# Out features selected from negative indices
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(A ,[-3, -1] ,A )
self.assertEqual(A ,["""a""", """c"""] )
self.assertEqual(A ,[-3, -1] )
def _UpperCamelCase ( self ):
# Stage names must be set
with self.assertRaises(A ):
verify_out_features_out_indices(["""a""", """b"""] ,(0, 1) ,A )
# Out features must be a list
with self.assertRaises(A ):
verify_out_features_out_indices(("""a""", """b""") ,(0, 1) ,["""a""", """b"""] )
# Out features must be a subset of stage names
with self.assertRaises(A ):
verify_out_features_out_indices(["""a""", """b"""] ,(0, 1) ,["""a"""] )
# Out indices must be a list or tuple
with self.assertRaises(A ):
verify_out_features_out_indices(A ,0 ,["""a""", """b"""] )
# Out indices must be a subset of stage names
with self.assertRaises(A ):
verify_out_features_out_indices(A ,(0, 1) ,["""a"""] )
# Out features and out indices must be the same length
with self.assertRaises(A ):
verify_out_features_out_indices(["""a""", """b"""] ,(0,) ,["""a""", """b""", """c"""] )
# Out features should match out indices
with self.assertRaises(A ):
verify_out_features_out_indices(["""a""", """b"""] ,(0, 2) ,["""a""", """b""", """c"""] )
# Out features and out indices should be in order
with self.assertRaises(A ):
verify_out_features_out_indices(["""b""", """a"""] ,(0, 1) ,["""a""", """b"""] )
# Check passes with valid inputs
verify_out_features_out_indices(["""a""", """b""", """d"""] ,(0, 1, -1) ,["""a""", """b""", """c""", """d"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = BackboneMixin()
UpperCAmelCase = ["""a""", """b""", """c"""]
UpperCAmelCase = ["""a""", """c"""]
UpperCAmelCase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features ,["""a""", """c"""] )
self.assertEqual(backbone.out_indices ,[0, 2] )
# Check out features and indices are updated correctly
UpperCAmelCase = ["""a""", """b"""]
self.assertEqual(backbone.out_features ,["""a""", """b"""] )
self.assertEqual(backbone.out_indices ,[0, 1] )
UpperCAmelCase = [-3, -1]
self.assertEqual(backbone.out_features ,["""a""", """c"""] )
self.assertEqual(backbone.out_indices ,[-3, -1] )
| 74 | 0 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = 'ClapFeatureExtractor'
SCREAMING_SNAKE_CASE = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self ,A ,A ):
super().__init__(A ,A )
def __call__( self ,A=None ,A=None ,A=None ,**A ):
UpperCAmelCase = kwargs.pop("""sampling_rate""" ,A )
if text is None and audios is None:
raise ValueError("""You have to specify either text or audios. Both cannot be none.""" )
if text is not None:
UpperCAmelCase = self.tokenizer(A ,return_tensors=A ,**A )
if audios is not None:
UpperCAmelCase = self.feature_extractor(
A ,sampling_rate=A ,return_tensors=A ,**A )
if text is not None and audios is not None:
UpperCAmelCase = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A ) ,tensor_type=A )
def _UpperCamelCase ( self ,*A ,**A ):
return self.tokenizer.batch_decode(*A ,**A )
def _UpperCamelCase ( self ,*A ,**A ):
return self.tokenizer.decode(*A ,**A )
@property
def _UpperCamelCase ( self ):
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 701 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class lowerCamelCase__ :
def __init__( self ,A = 6 ):
UpperCAmelCase = None
UpperCAmelCase = None
self.create_linked_list(A )
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = Node()
UpperCAmelCase = current_node
UpperCAmelCase = current_node
UpperCAmelCase = current_node
for _ in range(1 ,A ):
UpperCAmelCase = Node()
UpperCAmelCase = current_node
UpperCAmelCase = previous_node
UpperCAmelCase = current_node
UpperCAmelCase = self.front
UpperCAmelCase = previous_node
def _UpperCamelCase ( self ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def _UpperCamelCase ( self ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def _UpperCamelCase ( self ,A ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
UpperCAmelCase = self.rear.next
if self.rear:
UpperCAmelCase = data
def _UpperCamelCase ( self ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
UpperCAmelCase = self.front.data
UpperCAmelCase = None
return data
UpperCAmelCase = self.front
UpperCAmelCase = old_front.next
UpperCAmelCase = old_front.data
UpperCAmelCase = None
return data
def _UpperCamelCase ( self ):
if self.is_empty():
raise Exception("""Empty Queue""" )
def _UpperCamelCase ( self ):
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class lowerCamelCase__ :
def __init__( self ):
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCamelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ["""NllbTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ["""NllbTokenizerFast"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 702 |
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger("""transformers.models.speecht5""")
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
hf_model.apply_weight_norm()
UpperCAmelCase = checkpoint["""input_conv.weight_g"""]
UpperCAmelCase = checkpoint["""input_conv.weight_v"""]
UpperCAmelCase = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
UpperCAmelCase = checkpoint[F'''upsamples.{i}.1.weight_g''']
UpperCAmelCase = checkpoint[F'''upsamples.{i}.1.weight_v''']
UpperCAmelCase = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
UpperCAmelCase = checkpoint["""output_conv.1.weight_g"""]
UpperCAmelCase = checkpoint["""output_conv.1.weight_v"""]
UpperCAmelCase = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def _a ( _snake_case , _snake_case , _snake_case , _snake_case=None , _snake_case=None , ):
"""simple docstring"""
if config_path is not None:
UpperCAmelCase = SpeechTaHifiGanConfig.from_pretrained(_snake_case )
else:
UpperCAmelCase = SpeechTaHifiGanConfig()
UpperCAmelCase = SpeechTaHifiGan(_snake_case )
UpperCAmelCase = torch.load(_snake_case )
load_weights(orig_checkpoint["""model"""]["""generator"""] , _snake_case , _snake_case )
UpperCAmelCase = np.load(_snake_case )
UpperCAmelCase = stats[0].reshape(-1 )
UpperCAmelCase = stats[1].reshape(-1 )
UpperCAmelCase = torch.from_numpy(_snake_case ).float()
UpperCAmelCase = torch.from_numpy(_snake_case ).float()
model.save_pretrained(_snake_case )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(_snake_case )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_UpperCamelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 74 | 0 |
"""simple docstring"""
def _a ( _snake_case ):
"""simple docstring"""
if n_term == "":
return []
UpperCAmelCase = []
for temp in range(int(lowerCAmelCase_ ) ):
series.append(F'''1/{temp + 1}''' if series else """1""" )
return series
if __name__ == "__main__":
_UpperCamelCase = input("""Enter the last number (nth term) of the Harmonic Series""")
print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""")
print(harmonic_series(nth_term))
| 703 |
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
_UpperCamelCase = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_UpperCamelCase = concatenate_datasets
_UpperCamelCase = DownloadConfig
_UpperCamelCase = DownloadManager
_UpperCamelCase = DownloadMode
_UpperCamelCase = DownloadConfig
_UpperCamelCase = DownloadMode
_UpperCamelCase = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 74 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
_UpperCamelCase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _a ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
for attribute in key.split(""".""" ):
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if weight_type is not None:
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape
else:
UpperCAmelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
UpperCAmelCase = value
elif weight_type == "weight_g":
UpperCAmelCase = value
elif weight_type == "weight_v":
UpperCAmelCase = value
elif weight_type == "bias":
UpperCAmelCase = value
else:
UpperCAmelCase = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = fairseq_model.state_dict()
UpperCAmelCase = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
UpperCAmelCase = None
for name, value in fairseq_dict.items():
UpperCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == """group""" , )
UpperCAmelCase = True
elif name.split(""".""" )[0] == "proj":
UpperCAmelCase = fairseq_model.proj
UpperCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
UpperCAmelCase = True
if "*" in mapped_key:
UpperCAmelCase = name.split(SCREAMING_SNAKE_CASE__ )[0].split(""".""" )[-2]
UpperCAmelCase = mapped_key.replace("""*""" , SCREAMING_SNAKE_CASE__ )
if "weight_g" in name:
UpperCAmelCase = """weight_g"""
elif "weight_v" in name:
UpperCAmelCase = """weight_v"""
elif "bias" in name:
UpperCAmelCase = """bias"""
elif "weight" in name:
UpperCAmelCase = """weight"""
else:
UpperCAmelCase = None
set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
return proj_weight
def _a ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = full_name.split("""conv_layers.""" )[-1]
UpperCAmelCase = name.split(""".""" )
UpperCAmelCase = int(items[0] )
UpperCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
UpperCAmelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
UpperCAmelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
UpperCAmelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
UpperCAmelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = emb.weight.shape
UpperCAmelCase = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = emb.weight.data
return lin_layer
def _a ( _snake_case ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE__ , """r""" , encoding="""utf-8""" ) as f:
UpperCAmelCase = f.readlines()
UpperCAmelCase = [line.split(""" """ )[0] for line in lines]
UpperCAmelCase = len(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(SCREAMING_SNAKE_CASE__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def _a ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
UpperCAmelCase = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = SpeechaTextaConfig.from_pretrained(
SCREAMING_SNAKE_CASE__ , vocab_size=SCREAMING_SNAKE_CASE__ , decoder_layers=SCREAMING_SNAKE_CASE__ , do_stable_layer_norm=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
UpperCAmelCase = model[0].eval()
# set weights for wav2vec2 encoder
UpperCAmelCase = WavaVecaModel(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = recursively_load_weights_wavaveca(model.encoder , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = SpeechaTextaForCausalLM(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=SCREAMING_SNAKE_CASE__ )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
UpperCAmelCase = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
UpperCAmelCase = SpeechEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE__ , decoder=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = False
# add projection layer
UpperCAmelCase = nn.Parameter(projection_layer.weight )
UpperCAmelCase = nn.Parameter(projection_layer.bias )
UpperCAmelCase = create_vocab_dict(SCREAMING_SNAKE_CASE__ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , """vocab.json""" ) , """w""" ) as fp:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = SpeechaTextaTokenizer(os.path.join(SCREAMING_SNAKE_CASE__ , """vocab.json""" ) )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = hf_wavavec.config.to_dict()
UpperCAmelCase = tokenizer.pad_token_id
UpperCAmelCase = tokenizer.bos_token_id
UpperCAmelCase = tokenizer.eos_token_id
UpperCAmelCase = """speech_to_text_2"""
UpperCAmelCase = """wav2vec2"""
UpperCAmelCase = SpeechEncoderDecoderConfig.from_dict(SCREAMING_SNAKE_CASE__ )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE__ )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-large-lv60""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/s2t-small-mustc-en-fr-st""",
type=str,
help="""Path to hf decoder s2t checkpoint config""",
)
parser.add_argument("""--vocab_size""", default=10224, type=int, help="""Vocab size of decoder""")
parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""")
_UpperCamelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 704 |
"""simple docstring"""
def _a ( _snake_case ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ):
raise ValueError("""Input must be an integer""" )
if input_num <= 0:
raise ValueError("""Input must be positive""" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 | 0 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_UpperCamelCase = 250004
_UpperCamelCase = 250020
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE = MBartaaTokenizer
SCREAMING_SNAKE_CASE = MBartaaTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def _UpperCamelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase = MBartaaTokenizer(__snake_case ,src_lang="""en_XX""" ,tgt_lang="""ro_RO""" ,keep_accents=__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self ):
UpperCAmelCase = '''<s>'''
UpperCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) ,__snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) ,__snake_case )
def _UpperCamelCase ( self ):
UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<s>""" )
self.assertEqual(vocab_keys[1] ,"""<pad>""" )
self.assertEqual(vocab_keys[-1] ,"""<mask>""" )
self.assertEqual(len(__snake_case ) ,1_054 )
def _UpperCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,1_054 )
def _UpperCamelCase ( self ):
UpperCAmelCase = MBartaaTokenizer(__snake_case ,src_lang="""en_XX""" ,tgt_lang="""ro_RO""" ,keep_accents=__snake_case )
UpperCAmelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__snake_case ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,)
UpperCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__snake_case ,[SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] ,)
UpperCAmelCase = tokenizer.convert_tokens_to_ids(__snake_case )
self.assertListEqual(
__snake_case ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] ,)
UpperCAmelCase = tokenizer.convert_ids_to_tokens(__snake_case )
self.assertListEqual(
__snake_case ,[SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] ,)
@slow
def _UpperCamelCase ( self ):
# fmt: off
UpperCAmelCase = {'''input_ids''': [[250_004, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [250_004, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250_004, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case ,model_name="""facebook/mbart-large-50""" ,revision="""d3913889c59cd5c9e456b269c376325eabad57e2""" ,)
def _UpperCamelCase ( self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(__snake_case ,**__snake_case )
UpperCAmelCase = self.tokenizer_class.from_pretrained(__snake_case ,**__snake_case )
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = tokenizer_r.save_pretrained(__snake_case )
UpperCAmelCase = tokenizer_p.save_pretrained(__snake_case )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
UpperCAmelCase = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(__snake_case ,__snake_case )
# Checks everything loads correctly in the same way
UpperCAmelCase = tokenizer_r.from_pretrained(__snake_case )
UpperCAmelCase = tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case ,__snake_case ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__snake_case )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = tokenizer_r.save_pretrained(__snake_case ,legacy_format=__snake_case )
UpperCAmelCase = tokenizer_p.save_pretrained(__snake_case )
# Checks it save with the same files
self.assertSequenceEqual(__snake_case ,__snake_case )
# Checks everything loads correctly in the same way
UpperCAmelCase = tokenizer_r.from_pretrained(__snake_case )
UpperCAmelCase = tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case ,__snake_case ) )
shutil.rmtree(__snake_case )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = tokenizer_r.save_pretrained(__snake_case ,legacy_format=__snake_case )
UpperCAmelCase = tokenizer_p.save_pretrained(__snake_case )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase = tokenizer_r.from_pretrained(__snake_case )
UpperCAmelCase = tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case ,__snake_case ) )
shutil.rmtree(__snake_case )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE = '''facebook/mbart-large-50-one-to-many-mmt'''
SCREAMING_SNAKE_CASE = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
SCREAMING_SNAKE_CASE = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
SCREAMING_SNAKE_CASE = [EN_CODE, 8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2]
@classmethod
def _UpperCamelCase ( cls ):
UpperCAmelCase = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name ,src_lang="""en_XX""" ,tgt_lang="""ro_RO""" )
UpperCAmelCase = 1
return cls
def _UpperCamelCase ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] ,250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] ,250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] ,250_020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""mr_IN"""] ,250_038 )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens ,__snake_case )
def _UpperCamelCase ( self ):
self.assertIn(__snake_case ,self.tokenizer.all_special_ids )
UpperCAmelCase = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
UpperCAmelCase = self.tokenizer.decode(__snake_case ,skip_special_tokens=__snake_case )
UpperCAmelCase = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=__snake_case )
self.assertEqual(__snake_case ,__snake_case )
self.assertNotIn(self.tokenizer.eos_token ,__snake_case )
def _UpperCamelCase ( self ):
UpperCAmelCase = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] ,__snake_case )
UpperCAmelCase = 10
UpperCAmelCase = self.tokenizer(__snake_case ,max_length=__snake_case ,truncation=__snake_case ).input_ids[0]
self.assertEqual(ids[0] ,__snake_case )
self.assertEqual(ids[-1] ,2 )
self.assertEqual(len(__snake_case ) ,__snake_case )
def _UpperCamelCase ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) ,[250_053, 250_001] )
def _UpperCamelCase ( self ):
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__snake_case )
UpperCAmelCase = MBartaaTokenizer.from_pretrained(__snake_case )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,__snake_case )
@require_torch
def _UpperCamelCase ( self ):
UpperCAmelCase = self.tokenizer(self.src_text ,text_target=self.tgt_text ,padding=__snake_case ,return_tensors="""pt""" )
UpperCAmelCase = shift_tokens_right(batch["""labels"""] ,self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def _UpperCamelCase ( self ):
UpperCAmelCase = self.tokenizer(
self.src_text ,text_target=self.tgt_text ,padding=__snake_case ,truncation=__snake_case ,max_length=len(self.expected_src_tokens ) ,return_tensors="""pt""" ,)
UpperCAmelCase = shift_tokens_right(batch["""labels"""] ,self.tokenizer.pad_token_id )
self.assertIsInstance(__snake_case ,__snake_case )
self.assertEqual((2, 14) ,batch.input_ids.shape )
self.assertEqual((2, 14) ,batch.attention_mask.shape )
UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens ,__snake_case )
self.assertEqual(2 ,batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens ,[EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.tokenizer(self.src_text ,padding=__snake_case ,truncation=__snake_case ,max_length=3 ,return_tensors="""pt""" )
UpperCAmelCase = self.tokenizer(
text_target=self.tgt_text ,padding=__snake_case ,truncation=__snake_case ,max_length=10 ,return_tensors="""pt""" )
UpperCAmelCase = targets['''input_ids''']
UpperCAmelCase = shift_tokens_right(__snake_case ,self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] ,3 )
self.assertEqual(batch.decoder_input_ids.shape[1] ,10 )
@require_torch
def _UpperCamelCase ( self ):
UpperCAmelCase = self.tokenizer._build_translation_inputs(
"""A test""" ,return_tensors="""pt""" ,src_lang="""en_XX""" ,tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(__snake_case ) ,{
# en_XX, A, test, EOS
"""input_ids""": [[250_004, 62, 3_034, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 250_001,
} ,)
| 705 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_UpperCamelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
_UpperCamelCase = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
_UpperCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , )
SCREAMING_SNAKE_CASE = field(default=snake_case , metadata={'''help''': '''A folder containing the training data.'''} )
SCREAMING_SNAKE_CASE = field(default=snake_case , metadata={'''help''': '''A folder containing the validation data.'''} )
SCREAMING_SNAKE_CASE = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
SCREAMING_SNAKE_CASE = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} )
SCREAMING_SNAKE_CASE = field(
default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def _UpperCamelCase ( self ):
UpperCAmelCase = {}
if self.train_dir is not None:
UpperCAmelCase = self.train_dir
if self.validation_dir is not None:
UpperCAmelCase = self.validation_dir
UpperCAmelCase = data_files if data_files else None
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '''
'''checkpoint identifier on the hub. '''
'''Don\'t set if you want to train a model from scratch.'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(snake_case )} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , )
SCREAMING_SNAKE_CASE = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
SCREAMING_SNAKE_CASE = field(default=snake_case , metadata={'''help''': '''Name or path of preprocessor config.'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Stride to use for the encoder.'''} , )
class lowerCamelCase__ :
def __init__( self ,A=192 ,A=32 ,A=4 ,A=0.6 ):
UpperCAmelCase = input_size
UpperCAmelCase = mask_patch_size
UpperCAmelCase = model_patch_size
UpperCAmelCase = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("""Input size must be divisible by mask patch size""" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("""Mask patch size must be divisible by model patch size""" )
UpperCAmelCase = self.input_size // self.mask_patch_size
UpperCAmelCase = self.mask_patch_size // self.model_patch_size
UpperCAmelCase = self.rand_size**2
UpperCAmelCase = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self ):
UpperCAmelCase = np.random.permutation(self.token_count )[: self.mask_count]
UpperCAmelCase = np.zeros(self.token_count ,dtype=A )
UpperCAmelCase = 1
UpperCAmelCase = mask.reshape((self.rand_size, self.rand_size) )
UpperCAmelCase = mask.repeat(self.scale ,axis=0 ).repeat(self.scale ,axis=1 )
return torch.tensor(mask.flatten() )
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = torch.stack([example["""pixel_values"""] for example in examples] )
UpperCAmelCase = torch.stack([example["""mask"""] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def _a ( ):
"""simple docstring"""
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mim""" , _snake_case , _snake_case )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(_snake_case )
transformers.utils.logging.set_verbosity(_snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCAmelCase = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _snake_case ) and data_args.train_val_split > 0.0:
UpperCAmelCase = ds["""train"""].train_test_split(data_args.train_val_split )
UpperCAmelCase = split["""train"""]
UpperCAmelCase = split["""test"""]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
UpperCAmelCase = AutoConfig.from_pretrained(model_args.config_name_or_path , **_snake_case )
elif model_args.model_name_or_path:
UpperCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , **_snake_case )
else:
UpperCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(_snake_case , """decoder_type""" ):
UpperCAmelCase = """simmim"""
# adapt config
UpperCAmelCase = model_args.image_size if model_args.image_size is not None else config.image_size
UpperCAmelCase = model_args.patch_size if model_args.patch_size is not None else config.patch_size
UpperCAmelCase = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"""image_size""": model_args.image_size,
"""patch_size""": model_args.patch_size,
"""encoder_stride""": model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
UpperCAmelCase = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **_snake_case )
elif model_args.model_name_or_path:
UpperCAmelCase = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **_snake_case )
else:
UpperCAmelCase = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
UpperCAmelCase = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
UpperCAmelCase = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
UpperCAmelCase = AutoModelForMaskedImageModeling.from_config(_snake_case )
if training_args.do_train:
UpperCAmelCase = ds["""train"""].column_names
else:
UpperCAmelCase = ds["""validation"""].column_names
if data_args.image_column_name is not None:
UpperCAmelCase = data_args.image_column_name
elif "image" in column_names:
UpperCAmelCase = """image"""
elif "img" in column_names:
UpperCAmelCase = """img"""
else:
UpperCAmelCase = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
UpperCAmelCase = Compose(
[
Lambda(lambda _snake_case : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
UpperCAmelCase = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(_snake_case ):
UpperCAmelCase = [transforms(_snake_case ) for image in examples[image_column_name]]
UpperCAmelCase = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
UpperCAmelCase = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_snake_case )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
UpperCAmelCase = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_snake_case )
# Initialize our trainer
UpperCAmelCase = Trainer(
model=_snake_case , args=_snake_case , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=_snake_case , data_collator=_snake_case , )
# Training
if training_args.do_train:
UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase = last_checkpoint
UpperCAmelCase = trainer.train(resume_from_checkpoint=_snake_case )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCAmelCase = trainer.evaluate()
trainer.log_metrics("""eval""" , _snake_case )
trainer.save_metrics("""eval""" , _snake_case )
# Write model card and (optionally) push to hub
UpperCAmelCase = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """masked-image-modeling""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-image-modeling"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_snake_case )
else:
trainer.create_model_card(**_snake_case )
if __name__ == "__main__":
main()
| 74 | 0 |
"""simple docstring"""
def _a ( _snake_case ):
"""simple docstring"""
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
UpperCAmelCase = 1
UpperCAmelCase = 1
while repunit:
UpperCAmelCase = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _a ( _snake_case = 100_0000 ):
"""simple docstring"""
UpperCAmelCase = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_snake_case ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"""{solution() = }""")
| 706 |
"""simple docstring"""
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
_UpperCamelCase = True
from torch.cuda.amp import autocast
_UpperCamelCase = logging.getLogger(__name__)
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Whether to log verbose messages or not.'''} , )
SCREAMING_SNAKE_CASE = field(
default=2.0 , metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} )
SCREAMING_SNAKE_CASE = field(
default=0.5 , metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} )
SCREAMING_SNAKE_CASE = field(
default=0.99_99_95 , metadata={'''help''': '''Decay of gumbel temperature during training.'''} )
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
UpperCAmelCase = logging.WARNING
if model_args.verbose_logging:
UpperCAmelCase = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
UpperCAmelCase = logging.INFO
logger.setLevel(_snake_case )
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
SCREAMING_SNAKE_CASE = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
SCREAMING_SNAKE_CASE = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
SCREAMING_SNAKE_CASE = field(
default='''file''' , metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
SCREAMING_SNAKE_CASE = field(
default=1 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
SCREAMING_SNAKE_CASE = field(
default=20.0 , metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} )
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = "longest"
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
def __call__( self ,A ):
# reformat list to dict and set to pytorch format
UpperCAmelCase = self.feature_extractor.pad(
A ,max_length=self.max_length ,padding=self.padding ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors="""pt""" ,)
UpperCAmelCase = self.model._get_feat_extract_output_lengths(batch["""input_values"""].shape[-1] )
UpperCAmelCase = batch["""input_values"""].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
UpperCAmelCase = self.model._get_feat_extract_output_lengths(batch["""attention_mask"""].sum(-1 ) ).to(
torch.long )
UpperCAmelCase = torch.zeros(
(batch_size, mask_indices_seq_length) ,dtype=torch.long ,device=batch["""input_values"""].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
UpperCAmelCase = 1
UpperCAmelCase = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
UpperCAmelCase = _compute_mask_indices(
(batch_size, mask_indices_seq_length) ,self.model.config.mask_time_prob ,self.model.config.mask_time_length ,attention_mask=A ,min_masks=2 ,)
return batch
class lowerCamelCase__ ( snake_case ):
def __init__( self ,*A ,A=1 ,A=0 ,A=1.0 ,**A ):
super().__init__(*A ,**A )
UpperCAmelCase = 0
UpperCAmelCase = max_gumbel_temp
UpperCAmelCase = min_gumbel_temp
UpperCAmelCase = gumbel_temp_decay
def _UpperCamelCase ( self ,A ,A ):
model.train()
UpperCAmelCase = self._prepare_inputs(A )
if self.use_amp:
with autocast():
UpperCAmelCase = self.compute_loss(A ,A )
else:
UpperCAmelCase = self.compute_loss(A ,A )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
UpperCAmelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
UpperCAmelCase = loss.sum() / (inputs["""mask_time_indices"""]).sum()
else:
raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
UpperCAmelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(A ).backward()
elif self.use_apex:
with amp.scale_loss(A ,self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(A )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) )
return loss.detach()
def _a ( ):
"""simple docstring"""
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
configure_logger(_snake_case , _snake_case )
# Downloading and loading a dataset from the hub.
UpperCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
UpperCAmelCase = DatasetDict()
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , )
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
UpperCAmelCase = DatasetDict()
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="""validation""" , cache_dir=model_args.cache_dir , )
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_snake_case )
def prepare_dataset(_snake_case ):
# check that all files have the correct sampling rate
UpperCAmelCase , UpperCAmelCase = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
UpperCAmelCase = datasets.map(
_snake_case , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["""train"""].column_names )
# filter audio files that are too long
UpperCAmelCase = vectorized_datasets.filter(
lambda _snake_case : len(data["""speech"""] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_snake_case ):
return feature_extractor(batch["""speech"""] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
UpperCAmelCase = vectorized_datasets.map(
_snake_case , batched=_snake_case , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["""train"""].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
UpperCAmelCase = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"""PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"""
""" ``config.feat_extract_norm='layer'""" )
UpperCAmelCase = WavaVecaForPreTraining(_snake_case )
UpperCAmelCase = DataCollatorForWavaVecaPretraining(model=_snake_case , feature_extractor=_snake_case )
UpperCAmelCase = WavaVecaPreTrainer(
model=_snake_case , data_collator=_snake_case , args=_snake_case , train_dataset=vectorized_datasets["""train"""] , eval_dataset=vectorized_datasets["""validation"""] , tokenizer=_snake_case , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 74 | 0 |
"""simple docstring"""
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCamelCase ( self ):
UpperCAmelCase = inspect.getfile(accelerate.test_utils )
UpperCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
UpperCAmelCase = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def _UpperCamelCase ( self ):
UpperCAmelCase = F'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
UpperCAmelCase = [sys.executable] + distributed_args
execute_subprocess_async(__a ,env=os.environ.copy() )
| 707 |
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class lowerCamelCase__ :
def __init__( self ,A ,):
UpperCAmelCase = parent
UpperCAmelCase = 13
UpperCAmelCase = 7
UpperCAmelCase = 30
UpperCAmelCase = self.seq_length + self.mem_len
UpperCAmelCase = 15
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = 99
UpperCAmelCase = [10, 50, 80]
UpperCAmelCase = 32
UpperCAmelCase = 32
UpperCAmelCase = 4
UpperCAmelCase = 8
UpperCAmelCase = 128
UpperCAmelCase = 2
UpperCAmelCase = 2
UpperCAmelCase = None
UpperCAmelCase = 1
UpperCAmelCase = 0
UpperCAmelCase = 3
UpperCAmelCase = self.vocab_size - 1
UpperCAmelCase = 0.01
def _UpperCamelCase ( self ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = TransfoXLConfig(
vocab_size=self.vocab_size ,mem_len=self.mem_len ,clamp_len=self.clamp_len ,cutoffs=self.cutoffs ,d_model=self.hidden_size ,d_embed=self.d_embed ,n_head=self.num_attention_heads ,d_head=self.d_head ,d_inner=self.d_inner ,div_val=self.div_val ,n_layer=self.num_hidden_layers ,eos_token_id=self.eos_token_id ,pad_token_id=self.vocab_size - 1 ,init_range=self.init_range ,num_labels=self.num_labels ,)
return (config, input_ids_a, input_ids_a, lm_labels)
def _UpperCamelCase ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def _UpperCamelCase ( self ,A ,A ,A ,A ):
UpperCAmelCase = TFTransfoXLModel(A )
UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple()
UpperCAmelCase = {"""input_ids""": input_ids_a, """mems""": mems_a}
UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
def _UpperCamelCase ( self ,A ,A ,A ,A ):
UpperCAmelCase = TFTransfoXLLMHeadModel(A )
UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple()
UpperCAmelCase = {"""input_ids""": input_ids_a, """labels""": lm_labels}
UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple()
UpperCAmelCase , UpperCAmelCase = model([input_ids_a, mems_a] ).to_tuple()
UpperCAmelCase = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
def _UpperCamelCase ( self ,A ,A ,A ,A ):
UpperCAmelCase = TFTransfoXLForSequenceClassification(A )
UpperCAmelCase = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.prepare_config_and_inputs()
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) = config_and_inputs
UpperCAmelCase = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( snake_case , snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
SCREAMING_SNAKE_CASE = () if is_tf_available() else ()
SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': TFTransfoXLModel,
'''text-classification''': TFTransfoXLForSequenceClassification,
'''text-generation''': TFTransfoXLLMHeadModel,
'''zero-shot''': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def _UpperCamelCase ( self ):
UpperCAmelCase = TFTransfoXLModelTester(self )
UpperCAmelCase = ConfigTester(self ,config_class=A ,d_embed=37 )
def _UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
self.model_tester.set_seed()
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*A )
def _UpperCamelCase ( self ):
self.model_tester.set_seed()
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(A )
assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
UpperCAmelCase = model.get_output_embeddings()
assert isinstance(A ,tf.keras.layers.Layer )
UpperCAmelCase = model.get_bias()
assert name is None
else:
UpperCAmelCase = model.get_output_embeddings()
assert x is None
UpperCAmelCase = model.get_bias()
assert name is None
def _UpperCamelCase ( self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def _UpperCamelCase ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = TFTransfoXLModel.from_pretrained(A )
self.assertIsNotNone(A )
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" )
def _UpperCamelCase ( self ):
pass
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def _UpperCamelCase ( self ):
UpperCAmelCase = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
UpperCAmelCase = tf.convert_to_tensor([[33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0]] ,dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
UpperCAmelCase = [33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0,33,1,1_857,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,28,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
UpperCAmelCase = model.generate(A ,max_length=200 ,do_sample=A )
self.assertListEqual(output_ids[0].numpy().tolist() ,A )
| 74 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_UpperCamelCase = logging.get_logger(__name__)
class lowerCamelCase__ ( UpperCAmelCase_ ):
def __init__( self ,*A ,**A ):
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" ,_snake_case ,)
super().__init__(*_snake_case ,**_snake_case )
| 708 |
"""simple docstring"""
from math import sqrt
def _a ( _snake_case = 100_0000 ):
"""simple docstring"""
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_snake_case , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""")
| 74 | 0 |
"""simple docstring"""
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--user""", type=str, default="""ubuntu""")
parser.add_argument("""--host""", type=str, default="""localhost""")
parser.add_argument("""--key_path""", type=str, default=None)
parser.add_argument("""--instance""", type=str, default="""V100:1""")
parser.add_argument("""--provider""", type=str, default="""cheapest""")
parser.add_argument("""--use_spot""", type=bool, default=False)
parser.add_argument("""--example""", type=str, default="""pytorch/text-generation/run_generation.py""")
_UpperCamelCase = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("""Cannot specify both BYO and on-demand cluster args""")
_UpperCamelCase = rh.cluster(
name="""rh-cluster""", ips=[args.host], ssh_creds={"""ssh_user""": args.user, """ssh_private_key""": args.key_path}
)
else:
_UpperCamelCase = rh.cluster(
name="""rh-cluster""", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
_UpperCamelCase = args.example.rsplit("""/""", 1)[0]
# Set up remote environment
cluster.install_packages(["""pip:./"""]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F"""pip install -r transformers/examples/{example_dir}/requirements.txt"""])
cluster.run(["""pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"""])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F"""python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}"""])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 709 |
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
_UpperCamelCase = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
_UpperCamelCase = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def _a ( _snake_case , _snake_case=False ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = create_model(
"""HTSAT-tiny""" , """roberta""" , _snake_case , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=_snake_case , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = {}
UpperCAmelCase = R""".*sequential.(\d+).*"""
UpperCAmelCase = R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCAmelCase = key.replace(_snake_case , _snake_case )
if re.match(_snake_case , _snake_case ):
# replace sequential layers with list
UpperCAmelCase = re.match(_snake_case , _snake_case ).group(1 )
UpperCAmelCase = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(_snake_case )//3}.linear.''' )
elif re.match(_snake_case , _snake_case ):
UpperCAmelCase = int(re.match(_snake_case , _snake_case ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
UpperCAmelCase = 1 if projecton_layer == 0 else 2
UpperCAmelCase = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
UpperCAmelCase = value
UpperCAmelCase = mixed_qkv.size(0 ) // 3
UpperCAmelCase = mixed_qkv[:qkv_dim]
UpperCAmelCase = mixed_qkv[qkv_dim : qkv_dim * 2]
UpperCAmelCase = mixed_qkv[qkv_dim * 2 :]
UpperCAmelCase = query_layer
UpperCAmelCase = key_layer
UpperCAmelCase = value_layer
else:
UpperCAmelCase = value
return model_state_dict
def _a ( _snake_case , _snake_case , _snake_case , _snake_case=False ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = init_clap(_snake_case , enable_fusion=_snake_case )
clap_model.eval()
UpperCAmelCase = clap_model.state_dict()
UpperCAmelCase = rename_state_dict(_snake_case )
UpperCAmelCase = ClapConfig()
UpperCAmelCase = enable_fusion
UpperCAmelCase = ClapModel(_snake_case )
# ignore the spectrogram embedding layer
model.load_state_dict(_snake_case , strict=_snake_case )
model.save_pretrained(_snake_case )
transformers_config.save_pretrained(_snake_case )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
_UpperCamelCase = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 74 | 0 |
"""simple docstring"""
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"""facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowerCamelCase__ ( UpperCamelCase__ ):
SCREAMING_SNAKE_CASE = '''data2vec-audio'''
def __init__( self ,A=32 ,A=768 ,A=12 ,A=12 ,A=3_072 ,A="gelu" ,A=0.1 ,A=0.1 ,A=0.1 ,A=0.0 ,A=0.1 ,A=0.1 ,A=0.02 ,A=1e-5 ,A="gelu" ,A=(512, 512, 512, 512, 512, 512, 512) ,A=(5, 2, 2, 2, 2, 2, 2) ,A=(10, 3, 3, 3, 3, 2, 2) ,A=False ,A=16 ,A=19 ,A=5 ,A=0.05 ,A=10 ,A=2 ,A=0.0 ,A=10 ,A=0 ,A="sum" ,A=False ,A=False ,A=256 ,A=(512, 512, 512, 512, 1_500) ,A=(5, 3, 3, 1, 1) ,A=(1, 2, 3, 1, 1) ,A=512 ,A=0 ,A=1 ,A=2 ,A=False ,A=3 ,A=2 ,A=3 ,A=None ,**A ,):
super().__init__(**_a ,pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a )
UpperCAmelCase = hidden_size
UpperCAmelCase = feat_extract_activation
UpperCAmelCase = list(_a )
UpperCAmelCase = list(_a )
UpperCAmelCase = list(_a )
UpperCAmelCase = conv_bias
UpperCAmelCase = num_conv_pos_embeddings
UpperCAmelCase = num_conv_pos_embedding_groups
UpperCAmelCase = conv_pos_kernel_size
UpperCAmelCase = len(self.conv_dim )
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = feat_proj_dropout
UpperCAmelCase = final_dropout
UpperCAmelCase = layerdrop
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = initializer_range
UpperCAmelCase = vocab_size
UpperCAmelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase = mask_time_prob
UpperCAmelCase = mask_time_length
UpperCAmelCase = mask_time_min_masks
UpperCAmelCase = mask_feature_prob
UpperCAmelCase = mask_feature_length
UpperCAmelCase = mask_feature_min_masks
# ctc loss
UpperCAmelCase = ctc_loss_reduction
UpperCAmelCase = ctc_zero_infinity
# adapter
UpperCAmelCase = add_adapter
UpperCAmelCase = adapter_kernel_size
UpperCAmelCase = adapter_stride
UpperCAmelCase = num_adapter_layers
UpperCAmelCase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase = list(_a )
UpperCAmelCase = list(_a )
UpperCAmelCase = list(_a )
UpperCAmelCase = xvector_output_dim
@property
def _UpperCamelCase ( self ):
return math.prod(self.conv_stride )
| 710 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_UpperCamelCase = abspath(join(dirname(dirname(__file__)), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def _a ( _snake_case ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_snake_case )
def _a ( _snake_case ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
UpperCAmelCase = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(_snake_case , id=_snake_case )
| 74 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_UpperCamelCase = [
'small',
'small-base',
'medium',
'medium-base',
'intermediate',
'intermediate-base',
'large',
'large-base',
'xlarge',
'xlarge-base',
]
_UpperCamelCase = {
'vocab_file': {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt',
'funnel-transformer/medium-base': (
'https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'
),
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt',
'funnel-transformer/xlarge-base': (
'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json',
'funnel-transformer/small-base': (
'https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'
),
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json',
'funnel-transformer/medium-base': (
'https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'
),
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json',
'funnel-transformer/large-base': (
'https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'
),
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json',
'funnel-transformer/xlarge-base': (
'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'
),
},
}
_UpperCamelCase = {F"""funnel-transformer/{name}""": 512 for name in _model_names}
_UpperCamelCase = {F"""funnel-transformer/{name}""": {'do_lower_case': True} for name in _model_names}
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE = FunnelTokenizer
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = 2
def __init__( self ,A=None ,A=None ,A=True ,A="<unk>" ,A="<sep>" ,A="<pad>" ,A="<cls>" ,A="<mask>" ,A="<s>" ,A="</s>" ,A=True ,A=True ,A=None ,A="##" ,**A ,):
super().__init__(
UpperCamelCase__ ,tokenizer_file=UpperCamelCase__ ,do_lower_case=UpperCamelCase__ ,unk_token=UpperCamelCase__ ,sep_token=UpperCamelCase__ ,pad_token=UpperCamelCase__ ,cls_token=UpperCamelCase__ ,mask_token=UpperCamelCase__ ,bos_token=UpperCamelCase__ ,eos_token=UpperCamelCase__ ,clean_text=UpperCamelCase__ ,tokenize_chinese_chars=UpperCamelCase__ ,strip_accents=UpperCamelCase__ ,wordpieces_prefix=UpperCamelCase__ ,**UpperCamelCase__ ,)
UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,UpperCamelCase__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,UpperCamelCase__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,UpperCamelCase__ ) != tokenize_chinese_chars
):
UpperCAmelCase = getattr(UpperCamelCase__ ,normalizer_state.pop("""type""" ) )
UpperCAmelCase = do_lower_case
UpperCAmelCase = strip_accents
UpperCAmelCase = tokenize_chinese_chars
UpperCAmelCase = normalizer_class(**UpperCamelCase__ )
UpperCAmelCase = do_lower_case
def _UpperCamelCase ( self ,A ,A=None ):
UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCamelCase ( self ,A ,A = None ):
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self ,A ,A = None ):
UpperCAmelCase = self._tokenizer.model.save(UpperCamelCase__ ,name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
| 711 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import MutableSequence
class lowerCamelCase__ :
def __init__( self ,A ,A ):
if len(A ) != degree + 1:
raise ValueError(
"""The number of coefficients should be equal to the degree + 1.""" )
UpperCAmelCase = list(A )
UpperCAmelCase = degree
def __add__( self ,A ):
if self.degree > polynomial_a.degree:
UpperCAmelCase = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree ,A )
else:
UpperCAmelCase = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree ,A )
def __sub__( self ,A ):
return self + polynomial_a * Polynomial(0 ,[-1] )
def __neg__( self ):
return Polynomial(self.degree ,[-c for c in self.coefficients] )
def __mul__( self ,A ):
UpperCAmelCase = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree ,A )
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ):
UpperCAmelCase = """"""
for i in range(self.degree ,-1 ,-1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(A )
return polynomial
def __repr__( self ):
return self.__str__()
def _UpperCamelCase ( self ):
UpperCAmelCase = [0] * self.degree
for i in range(self.degree ):
UpperCAmelCase = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 ,A )
def _UpperCamelCase ( self ,A = 0 ):
UpperCAmelCase = [0] * (self.degree + 2)
UpperCAmelCase = constant
for i in range(self.degree + 1 ):
UpperCAmelCase = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 ,A )
def __eq__( self ,A ):
if not isinstance(A ,A ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self ,A ):
return not self.__eq__(A )
| 74 | 0 |
"""simple docstring"""
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = RobertaPreLayerNormConfig.from_pretrained(
lowerCamelCase_ , architectures=["""RobertaPreLayerNormForMaskedLM"""] )
# convert state_dict
UpperCAmelCase = torch.load(hf_hub_download(repo_id=lowerCamelCase_ , filename="""pytorch_model.bin""" ) )
UpperCAmelCase = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("""roberta.""" ):
UpperCAmelCase = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ):
continue
UpperCAmelCase = tensor_value
UpperCAmelCase = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=lowerCamelCase_ , config=lowerCamelCase_ , state_dict=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
# convert tokenizer
UpperCAmelCase = AutoTokenizer.from_pretrained(lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_UpperCamelCase = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 712 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class lowerCamelCase__ :
def __init__( self ,A ,):
UpperCAmelCase = parent
UpperCAmelCase = 13
UpperCAmelCase = 7
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = 2
UpperCAmelCase = 99
UpperCAmelCase = 0
UpperCAmelCase = 32
UpperCAmelCase = 2
UpperCAmelCase = 4
UpperCAmelCase = 0.1
UpperCAmelCase = 0.1
UpperCAmelCase = 512
UpperCAmelCase = 16
UpperCAmelCase = 2
UpperCAmelCase = 0.02
UpperCAmelCase = 3
UpperCAmelCase = 4
UpperCAmelCase = """last"""
UpperCAmelCase = True
UpperCAmelCase = None
UpperCAmelCase = 0
def _UpperCamelCase ( self ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ,dtype=tf.floataa )
UpperCAmelCase = None
if self.use_input_lengths:
UpperCAmelCase = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] ,2 ,dtype=tf.floataa )
UpperCAmelCase = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase = FlaubertConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,bos_token_id=self.bos_token_id ,)
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = TFFlaubertModel(config=A )
UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCAmelCase = model(A )
UpperCAmelCase = [input_ids, input_mask]
UpperCAmelCase = model(A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = TFFlaubertWithLMHeadModel(A )
UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCAmelCase = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = TFFlaubertForQuestionAnsweringSimple(A )
UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCAmelCase = model(A )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = TFFlaubertForSequenceClassification(A )
UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCAmelCase = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = self.num_labels
UpperCAmelCase = TFFlaubertForTokenClassification(config=A )
UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCAmelCase = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = self.num_choices
UpperCAmelCase = TFFlaubertForMultipleChoice(config=A )
UpperCAmelCase = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
UpperCAmelCase = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""langs""": token_type_ids,
"""lengths""": input_lengths,
}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( snake_case , snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': TFFlaubertModel,
'''fill-mask''': TFFlaubertWithLMHeadModel,
'''question-answering''': TFFlaubertForQuestionAnsweringSimple,
'''text-classification''': TFFlaubertForSequenceClassification,
'''token-classification''': TFFlaubertForTokenClassification,
'''zero-shot''': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _UpperCamelCase ( self ):
UpperCAmelCase = TFFlaubertModelTester(self )
UpperCAmelCase = ConfigTester(self ,config_class=A ,emb_dim=37 )
def _UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*A )
@slow
def _UpperCamelCase ( self ):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = TFFlaubertModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self ):
UpperCAmelCase = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
UpperCAmelCase = tf.convert_to_tensor(
[[0, 158, 735, 2_592, 1_424, 6_727, 82, 1]] ,dtype=tf.intaa ,) # "J'aime flaubert !"
UpperCAmelCase = model(A )[0]
UpperCAmelCase = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape ,A )
# compare the actual values for a slice.
UpperCAmelCase = tf.convert_to_tensor(
[
[
[-1.8768773, -1.566555, 0.27072418],
[-1.6920038, -0.5873505, 1.9329599],
[-2.9563985, -1.6993835, 1.7972052],
]
] ,dtype=tf.floataa ,)
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 74 | 0 |
"""simple docstring"""
def _a ( _snake_case ) -> list:
"""simple docstring"""
if any(not isinstance(__snake_case , __snake_case ) or x < 0 for x in sequence ):
raise TypeError("""Sequence must be list of non-negative integers""" )
for _ in range(len(__snake_case ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(__snake_case , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 713 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith("""module.encoder""" ):
UpperCAmelCase = key.replace("""module.encoder""" , """glpn.encoder""" )
if key.startswith("""module.decoder""" ):
UpperCAmelCase = key.replace("""module.decoder""" , """decoder.stages""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCAmelCase = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
UpperCAmelCase = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(_snake_case )-1}''' )
if "norm" in key:
UpperCAmelCase = key.replace("""norm""" , """layer_norm""" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCAmelCase = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )]
UpperCAmelCase = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(_snake_case )-1}''' )
if "layer_norm1" in key:
UpperCAmelCase = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
UpperCAmelCase = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
UpperCAmelCase = key[key.find("""block""" ) + len("""block""" )]
UpperCAmelCase = key.replace(F'''block{idx}''' , F'''block.{int(_snake_case )-1}''' )
if "attn.q" in key:
UpperCAmelCase = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
UpperCAmelCase = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
UpperCAmelCase = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
UpperCAmelCase = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
UpperCAmelCase = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
UpperCAmelCase = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
UpperCAmelCase = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
UpperCAmelCase = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCAmelCase = key[key.find("""linear_c""" ) + len("""linear_c""" )]
UpperCAmelCase = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(_snake_case )-1}''' )
if "bot_conv" in key:
UpperCAmelCase = key.replace("""bot_conv""" , """0.convolution""" )
if "skip_conv1" in key:
UpperCAmelCase = key.replace("""skip_conv1""" , """1.convolution""" )
if "skip_conv2" in key:
UpperCAmelCase = key.replace("""skip_conv2""" , """2.convolution""" )
if "fusion1" in key:
UpperCAmelCase = key.replace("""fusion1""" , """1.fusion""" )
if "fusion2" in key:
UpperCAmelCase = key.replace("""fusion2""" , """2.fusion""" )
if "fusion3" in key:
UpperCAmelCase = key.replace("""fusion3""" , """3.fusion""" )
if "fusion" in key and "conv" in key:
UpperCAmelCase = key.replace("""conv""" , """convolutional_layer""" )
if key.startswith("""module.last_layer_depth""" ):
UpperCAmelCase = key.replace("""module.last_layer_depth""" , """head.head""" )
UpperCAmelCase = value
return new_state_dict
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCAmelCase = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
UpperCAmelCase = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
UpperCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
UpperCAmelCase = kv_bias[: config.hidden_sizes[i]]
UpperCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCAmelCase = kv_bias[config.hidden_sizes[i] :]
def _a ( ):
"""simple docstring"""
UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return image
@torch.no_grad()
def _a ( _snake_case , _snake_case , _snake_case=False , _snake_case=None ):
"""simple docstring"""
UpperCAmelCase = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
UpperCAmelCase = GLPNImageProcessor()
# prepare image
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=_snake_case , return_tensors="""pt""" ).pixel_values
logger.info("""Converting model...""" )
# load original state dict
UpperCAmelCase = torch.load(_snake_case , map_location=torch.device("""cpu""" ) )
# rename keys
UpperCAmelCase = rename_keys(_snake_case )
# key and value matrices need special treatment
read_in_k_v(_snake_case , _snake_case )
# create HuggingFace model and load state dict
UpperCAmelCase = GLPNForDepthEstimation(_snake_case )
model.load_state_dict(_snake_case )
model.eval()
# forward pass
UpperCAmelCase = model(_snake_case )
UpperCAmelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
UpperCAmelCase = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
UpperCAmelCase = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
UpperCAmelCase = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , _snake_case , atol=1E-4 )
print("""Looks ok!""" )
# finally, push to hub if required
if push_to_hub:
logger.info("""Pushing model and image processor to the hub...""" )
model.push_to_hub(
repo_path_or_name=Path(_snake_case , _snake_case ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_snake_case , )
image_processor.push_to_hub(
repo_path_or_name=Path(_snake_case , _snake_case ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_snake_case , )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
_UpperCamelCase = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 74 | 0 |
"""simple docstring"""
from __future__ import annotations
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = len(a__ ) // 2
# choose the middle 3 elements
UpperCAmelCase = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
"""simple docstring"""
def _a ( _snake_case ): # noqa: E741
"""simple docstring"""
UpperCAmelCase = len(_snake_case )
UpperCAmelCase = 0
UpperCAmelCase = [0] * n
UpperCAmelCase = [False] * n
UpperCAmelCase = [False] * n
def dfs(_snake_case , _snake_case , _snake_case , _snake_case ):
if parent == root:
out_edge_count += 1
UpperCAmelCase = True
UpperCAmelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
UpperCAmelCase = dfs(_snake_case , _snake_case , _snake_case , _snake_case )
UpperCAmelCase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
UpperCAmelCase = True
# AP found via cycle
if at == low[to]:
UpperCAmelCase = True
else:
UpperCAmelCase = min(low[at] , _snake_case )
return out_edge_count
for i in range(_snake_case ):
if not visited[i]:
UpperCAmelCase = 0
UpperCAmelCase = dfs(_snake_case , _snake_case , -1 , _snake_case )
UpperCAmelCase = out_edge_count > 1
for x in range(len(_snake_case ) ):
if is_art[x] is True:
print(_snake_case )
# Adjacency list of graph
_UpperCamelCase = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 74 | 0 |
"""simple docstring"""
import qiskit
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
UpperCAmelCase = qiskit.QuantumCircuit(UpperCamelCase__ , UpperCamelCase__ )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
UpperCAmelCase = qiskit.execute(UpperCamelCase__ , UpperCamelCase__ , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(UpperCamelCase__ )
if __name__ == "__main__":
print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 715 |
"""simple docstring"""
_UpperCamelCase = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_UpperCamelCase = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
_UpperCamelCase = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 74 | 0 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase__ :
def __init__( self ,A ,A=13 ,A=32 ,A=2 ,A=3 ,A=16 ,A=[32, 64, 128] ,A=[1, 2, 1] ,A=[2, 2, 4] ,A=2 ,A=2.0 ,A=True ,A=0.0 ,A=0.0 ,A=0.1 ,A="gelu" ,A=False ,A=True ,A=0.02 ,A=1e-5 ,A=True ,A=None ,A=True ,A=10 ,A=8 ,A=["stage1", "stage2"] ,A=[1, 2] ,):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = embed_dim
UpperCAmelCase = hidden_sizes
UpperCAmelCase = depths
UpperCAmelCase = num_heads
UpperCAmelCase = window_size
UpperCAmelCase = mlp_ratio
UpperCAmelCase = qkv_bias
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = drop_path_rate
UpperCAmelCase = hidden_act
UpperCAmelCase = use_absolute_embeddings
UpperCAmelCase = patch_norm
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = initializer_range
UpperCAmelCase = is_training
UpperCAmelCase = scope
UpperCAmelCase = use_labels
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = encoder_stride
UpperCAmelCase = out_features
UpperCAmelCase = out_indices
def _UpperCamelCase ( self ):
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self ):
return FocalNetConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,out_features=self.out_features ,out_indices=self.out_indices ,)
def _UpperCamelCase ( self ,A ,A ,A ):
UpperCAmelCase = FocalNetModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def _UpperCamelCase ( self ,A ,A ,A ):
UpperCAmelCase = FocalNetBackbone(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
UpperCAmelCase = None
UpperCAmelCase = FocalNetBackbone(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def _UpperCamelCase ( self ,A ,A ,A ):
UpperCAmelCase = FocalNetForMaskedImageModeling(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase = 1
UpperCAmelCase = FocalNetForMaskedImageModeling(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def _UpperCamelCase ( self ,A ,A ,A ):
UpperCAmelCase = self.type_sequence_label_size
UpperCAmelCase = FocalNetForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase = 1
UpperCAmelCase = FocalNetForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _UpperCamelCase ( self ):
UpperCAmelCase = FocalNetModelTester(self )
UpperCAmelCase = ConfigTester(self ,config_class=__SCREAMING_SNAKE_CASE ,embed_dim=37 ,has_text_modality=__SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCamelCase ( self ):
return
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def _UpperCamelCase ( self ):
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE ,nn.Linear ) )
def _UpperCamelCase ( self ):
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,__SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( self ,A ,A ,A ,A ):
UpperCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) )
UpperCAmelCase = outputs.hidden_states
UpperCAmelCase = getattr(
self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) ,__SCREAMING_SNAKE_CASE )
# FocalNet has a different seq_length
UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
UpperCAmelCase = outputs.reshaped_hidden_states
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) ,__SCREAMING_SNAKE_CASE )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = reshaped_hidden_states[0].shape
UpperCAmelCase = (
reshaped_hidden_states[0].view(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def _UpperCamelCase ( self ):
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( self ):
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = 3
UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,(padded_height, padded_width) )
@slow
def _UpperCamelCase ( self ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = FocalNetModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( self ):
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = _config_zero_init(__SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(config=__SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
@require_vision
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
@cached_property
def _UpperCamelCase ( self ):
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def _UpperCamelCase ( self ):
UpperCAmelCase = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
UpperCAmelCase = image_processor(images=__SCREAMING_SNAKE_CASE ,return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
UpperCAmelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,__SCREAMING_SNAKE_CASE )
UpperCAmelCase = torch.tensor([0.2166, -0.4368, 0.2191] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__SCREAMING_SNAKE_CASE ,atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() ,281 )
@require_torch
class lowerCamelCase__ ( __UpperCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (FocalNetBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = FocalNetConfig
SCREAMING_SNAKE_CASE = False
def _UpperCamelCase ( self ):
UpperCAmelCase = FocalNetModelTester(self )
| 716 |
"""simple docstring"""
import argparse
import struct
import unittest
class lowerCamelCase__ :
def __init__( self ,A ):
UpperCAmelCase = data
# Initialize hash values
UpperCAmelCase = [
0x6A_09_E6_67,
0xBB_67_AE_85,
0x3C_6E_F3_72,
0xA5_4F_F5_3A,
0x51_0E_52_7F,
0x9B_05_68_8C,
0x1F_83_D9_AB,
0x5B_E0_CD_19,
]
# Initialize round constants
UpperCAmelCase = [
0x42_8A_2F_98,
0x71_37_44_91,
0xB5_C0_FB_CF,
0xE9_B5_DB_A5,
0x39_56_C2_5B,
0x59_F1_11_F1,
0x92_3F_82_A4,
0xAB_1C_5E_D5,
0xD8_07_AA_98,
0x12_83_5B_01,
0x24_31_85_BE,
0x55_0C_7D_C3,
0x72_BE_5D_74,
0x80_DE_B1_FE,
0x9B_DC_06_A7,
0xC1_9B_F1_74,
0xE4_9B_69_C1,
0xEF_BE_47_86,
0x0F_C1_9D_C6,
0x24_0C_A1_CC,
0x2D_E9_2C_6F,
0x4A_74_84_AA,
0x5C_B0_A9_DC,
0x76_F9_88_DA,
0x98_3E_51_52,
0xA8_31_C6_6D,
0xB0_03_27_C8,
0xBF_59_7F_C7,
0xC6_E0_0B_F3,
0xD5_A7_91_47,
0x06_CA_63_51,
0x14_29_29_67,
0x27_B7_0A_85,
0x2E_1B_21_38,
0x4D_2C_6D_FC,
0x53_38_0D_13,
0x65_0A_73_54,
0x76_6A_0A_BB,
0x81_C2_C9_2E,
0x92_72_2C_85,
0xA2_BF_E8_A1,
0xA8_1A_66_4B,
0xC2_4B_8B_70,
0xC7_6C_51_A3,
0xD1_92_E8_19,
0xD6_99_06_24,
0xF4_0E_35_85,
0x10_6A_A0_70,
0x19_A4_C1_16,
0x1E_37_6C_08,
0x27_48_77_4C,
0x34_B0_BC_B5,
0x39_1C_0C_B3,
0x4E_D8_AA_4A,
0x5B_9C_CA_4F,
0x68_2E_6F_F3,
0x74_8F_82_EE,
0x78_A5_63_6F,
0x84_C8_78_14,
0x8C_C7_02_08,
0x90_BE_FF_FA,
0xA4_50_6C_EB,
0xBE_F9_A3_F7,
0xC6_71_78_F2,
]
UpperCAmelCase = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _UpperCamelCase ( A ):
UpperCAmelCase = b"""\x80""" + (b"""\x00""" * (63 - (len(A ) + 8) % 64))
UpperCAmelCase = struct.pack(""">Q""" ,(len(A ) * 8) )
return data + padding + big_endian_integer
def _UpperCamelCase ( self ):
# Convert into blocks of 64 bytes
UpperCAmelCase = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCAmelCase = list(struct.unpack(""">16L""" ,A ) )
# add 48 0-ed integers
words += [0] * 48
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
UpperCAmelCase = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
UpperCAmelCase = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
UpperCAmelCase = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_00_00_00_00
# Compression
UpperCAmelCase = self.ror(A ,6 ) ^ self.ror(A ,11 ) ^ self.ror(A ,25 )
UpperCAmelCase = (e & f) ^ ((~e & 0xFF_FF_FF_FF) & g)
UpperCAmelCase = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_00_00_00_00
UpperCAmelCase = self.ror(A ,2 ) ^ self.ror(A ,13 ) ^ self.ror(A ,22 )
UpperCAmelCase = (a & b) ^ (a & c) ^ (b & c)
UpperCAmelCase = (sa + maj) % 0x1_00_00_00_00
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = (
g,
f,
e,
((d + tempa) % 0x1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0x1_00_00_00_00),
)
UpperCAmelCase = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCAmelCase = [
((element + mutated_hash_values[index]) % 0x1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
UpperCAmelCase = """""".join([hex(A )[2:].zfill(8 ) for value in self.hashes] )
def _UpperCamelCase ( self ,A ,A ):
return 0xFF_FF_FF_FF & (value << (32 - rotations)) | (value >> rotations)
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCamelCase ( self ):
import hashlib
UpperCAmelCase = bytes("""Test String""" ,"""utf-8""" )
self.assertEqual(SHAaaa(A ).hash ,hashlib.shaaaa(A ).hexdigest() )
def _a ( ):
"""simple docstring"""
import doctest
doctest.testmod()
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
UpperCAmelCase = f.read()
else:
UpperCAmelCase = bytes(_snake_case , """utf-8""" )
print(SHAaaa(_snake_case ).hash )
if __name__ == "__main__":
main()
| 74 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCamelCase = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 717 |
"""simple docstring"""
def _a ( _snake_case = 10 , _snake_case = 22 ):
"""simple docstring"""
UpperCAmelCase = range(1 , _snake_case )
UpperCAmelCase = range(1 , _snake_case )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"""{solution(10, 22) = }""")
| 74 | 0 |
"""simple docstring"""
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
_UpperCamelCase = version.parse(importlib_metadata.version("""nltk"""))
if NLTK_VERSION >= version.Version("""3.6.4"""):
from nltk import word_tokenize
_UpperCamelCase = """\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"""
_UpperCamelCase = """\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"""
_UpperCamelCase = """\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
def _UpperCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ,id="""sequence""" ),
"""references""": datasets.Value("""string""" ,id="""sequence""" ),
} ) ,codebase_urls=["""https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"""] ,reference_urls=[
"""https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score""",
"""https://en.wikipedia.org/wiki/METEOR""",
] ,)
def _UpperCamelCase ( self ,A ):
import nltk
nltk.download("""wordnet""" )
if NLTK_VERSION >= version.Version("""3.6.5""" ):
nltk.download("""punkt""" )
if NLTK_VERSION >= version.Version("""3.6.6""" ):
nltk.download("""omw-1.4""" )
def _UpperCamelCase ( self ,A ,A ,A=0.9 ,A=3 ,A=0.5 ):
if NLTK_VERSION >= version.Version("""3.6.5""" ):
UpperCAmelCase = [
meteor_score.single_meteor_score(
word_tokenize(__A ) ,word_tokenize(__A ) ,alpha=__A ,beta=__A ,gamma=__A )
for ref, pred in zip(__A ,__A )
]
else:
UpperCAmelCase = [
meteor_score.single_meteor_score(__A ,__A ,alpha=__A ,beta=__A ,gamma=__A )
for ref, pred in zip(__A ,__A )
]
return {"meteor": np.mean(__A )}
| 718 |
"""simple docstring"""
from __future__ import annotations
def _a ( _snake_case ):
"""simple docstring"""
return len(set(_snake_case ) ) == len(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Generic, TypeVar
_UpperCamelCase = TypeVar("""T""")
class lowerCamelCase__ ( Generic[T] ):
def __init__( self ,A ):
UpperCAmelCase = data
UpperCAmelCase = self
UpperCAmelCase = 0
class lowerCamelCase__ ( Generic[T] ):
def __init__( self ):
UpperCAmelCase = {}
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = DisjointSetTreeNode(_lowercase )
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = self.map[data]
if elem_ref != elem_ref.parent:
UpperCAmelCase = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def _UpperCamelCase ( self ,A ,A ):
if nodea.rank > nodea.rank:
UpperCAmelCase = nodea
else:
UpperCAmelCase = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def _UpperCamelCase ( self ,A ,A ):
self.link(self.find_set(_lowercase ) ,self.find_set(_lowercase ) )
class lowerCamelCase__ ( Generic[T] ):
def __init__( self ):
UpperCAmelCase = {}
def _UpperCamelCase ( self ,A ):
if node not in self.connections:
UpperCAmelCase = {}
def _UpperCamelCase ( self ,A ,A ,A ):
self.add_node(_lowercase )
self.add_node(_lowercase )
UpperCAmelCase = weight
UpperCAmelCase = weight
def _UpperCamelCase ( self ):
UpperCAmelCase = []
UpperCAmelCase = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda A : x[2] )
# creating the disjoint set
UpperCAmelCase = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(_lowercase )
# MST generation
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
UpperCAmelCase = edges[index]
index += 1
UpperCAmelCase = disjoint_set.find_set(_lowercase )
UpperCAmelCase = disjoint_set.find_set(_lowercase )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(_lowercase ,_lowercase ,_lowercase )
disjoint_set.union(_lowercase ,_lowercase )
return graph
| 719 |
"""simple docstring"""
import math
def _a ( _snake_case ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _a ( _snake_case = 0.1 ):
"""simple docstring"""
UpperCAmelCase = 3
UpperCAmelCase = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_snake_case )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_UpperCamelCase = logging.get_logger(__name__)
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = ['''pixel_values''']
def __init__( self ,A = True ,A = 32 ,A=PILImageResampling.BILINEAR ,A = True ,**A ,):
UpperCAmelCase = do_resize
UpperCAmelCase = do_rescale
UpperCAmelCase = size_divisor
UpperCAmelCase = resample
super().__init__(**A__ )
def _UpperCamelCase ( self ,A ,A ,A ,A = None ,**A ):
UpperCAmelCase = get_image_size(A__ )
# Rounds the height and width down to the closest multiple of size_divisor
UpperCAmelCase = height // size_divisor * size_divisor
UpperCAmelCase = width // size_divisor * size_divisor
UpperCAmelCase = resize(A__ ,(new_h, new_w) ,resample=A__ ,data_format=A__ ,**A__ )
return image
def _UpperCamelCase ( self ,A ,A ,A = None ,**A ):
return rescale(image=A__ ,scale=A__ ,data_format=A__ ,**A__ )
def _UpperCamelCase ( self ,A ,A = None ,A = None ,A=None ,A = None ,A = None ,A = ChannelDimension.FIRST ,**A ,):
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = size_divisor if size_divisor is not None else self.size_divisor
UpperCAmelCase = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("""size_divisor is required for resizing""" )
UpperCAmelCase = make_list_of_images(A__ )
if not valid_images(A__ ):
raise ValueError("""Invalid image(s)""" )
# All transformations expect numpy arrays.
UpperCAmelCase = [to_numpy_array(A__ ) for img in images]
if do_resize:
UpperCAmelCase = [self.resize(A__ ,size_divisor=A__ ,resample=A__ ) for image in images]
if do_rescale:
UpperCAmelCase = [self.rescale(A__ ,scale=1 / 255 ) for image in images]
UpperCAmelCase = [to_channel_dimension_format(A__ ,A__ ) for image in images]
UpperCAmelCase = {"""pixel_values""": images}
return BatchFeature(data=A__ ,tensor_type=A__ )
| 720 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE = '''CLIPImageProcessor'''
SCREAMING_SNAKE_CASE = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self ,A=None ,A=None ,**A ):
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,A ,)
UpperCAmelCase = kwargs.pop("""feature_extractor""" )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(A ,A )
def __call__( self ,A=None ,A=None ,A=None ,**A ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
UpperCAmelCase = self.tokenizer(A ,return_tensors=A ,**A )
if images is not None:
UpperCAmelCase = self.image_processor(A ,return_tensors=A ,**A )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A ) ,tensor_type=A )
def _UpperCamelCase ( self ,*A ,**A ):
return self.tokenizer.batch_decode(*A ,**A )
def _UpperCamelCase ( self ,*A ,**A ):
return self.tokenizer.decode(*A ,**A )
@property
def _UpperCamelCase ( self ):
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _UpperCamelCase ( self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,A ,)
return self.image_processor_class
@property
def _UpperCamelCase ( self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,A ,)
return self.image_processor
| 74 | 0 |
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_UpperCamelCase = logging.getLogger(__name__)
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
if os.path.exists(lowerCAmelCase__ ):
if os.path.exists(os.path.join(lowerCAmelCase__ , """config.json""" ) ) and os.path.isfile(
os.path.join(lowerCAmelCase__ , """config.json""" ) ):
os.remove(os.path.join(lowerCAmelCase__ , """config.json""" ) )
if os.path.exists(os.path.join(lowerCAmelCase__ , """pytorch_model.bin""" ) ) and os.path.isfile(
os.path.join(lowerCAmelCase__ , """pytorch_model.bin""" ) ):
os.remove(os.path.join(lowerCAmelCase__ , """pytorch_model.bin""" ) )
else:
os.makedirs(lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
def _a ( _snake_case , _snake_case=False ):
"""simple docstring"""
UpperCAmelCase = 2
if unlogit:
UpperCAmelCase = torch.pow(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase = p * torch.log(lowerCAmelCase__ )
UpperCAmelCase = 0
return -plogp.sum(dim=-1 )
def _a ( _snake_case ):
"""simple docstring"""
logger.info("""lv, h >\t""" + """\t""".join(F'''{x + 1}''' for x in range(len(lowerCAmelCase__ ) ) ) )
for row in range(len(lowerCAmelCase__ ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + """\t""".join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + """\t""".join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def _a ( _snake_case , _snake_case , _snake_case , _snake_case=True , _snake_case=True , _snake_case=None , _snake_case=False ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = model.config.num_hidden_layers, model.config.num_attention_heads
UpperCAmelCase = torch.zeros(lowerCAmelCase__ , lowerCAmelCase__ ).to(args.device )
UpperCAmelCase = torch.zeros(lowerCAmelCase__ , lowerCAmelCase__ ).to(args.device )
if head_mask is None:
UpperCAmelCase = torch.ones(lowerCAmelCase__ , lowerCAmelCase__ ).to(args.device )
head_mask.requires_grad_(requires_grad=lowerCAmelCase__ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
UpperCAmelCase = None
UpperCAmelCase = 0.0
UpperCAmelCase = 0.0
for step, inputs in enumerate(tqdm(lowerCAmelCase__ , desc="""Iteration""" , disable=args.local_rank not in [-1, 0] ) ):
UpperCAmelCase = tuple(t.to(args.device ) for t in inputs )
((UpperCAmelCase ) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
UpperCAmelCase = model(lowerCAmelCase__ , labels=lowerCAmelCase__ , head_mask=lowerCAmelCase__ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(lowerCAmelCase__ ):
UpperCAmelCase = entropy(attn.detach() , lowerCAmelCase__ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(lowerCAmelCase__ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
UpperCAmelCase = 2
UpperCAmelCase = torch.pow(torch.pow(lowerCAmelCase__ , lowerCAmelCase__ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
UpperCAmelCase = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("""Attention entropies""" )
print_ad_tensor(lowerCAmelCase__ )
if compute_importance:
logger.info("""Head importance scores""" )
print_ad_tensor(lowerCAmelCase__ )
logger.info("""Head ranked by importance scores""" )
UpperCAmelCase = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
UpperCAmelCase = torch.arange(
head_importance.numel() , device=args.device )
UpperCAmelCase = head_ranks.view_as(lowerCAmelCase__ )
print_ad_tensor(lowerCAmelCase__ )
return attn_entropy, head_importance, total_loss
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = compute_heads_importance(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , compute_entropy=lowerCAmelCase__ )
UpperCAmelCase = 1 / loss # instead of downsteam score use the LM loss
logger.info("""Pruning: original score: %f, threshold: %f""" , lowerCAmelCase__ , original_score * args.masking_threshold )
UpperCAmelCase = torch.ones_like(lowerCAmelCase__ )
UpperCAmelCase = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
UpperCAmelCase = original_score
while current_score >= original_score * args.masking_threshold:
UpperCAmelCase = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
UpperCAmelCase = float("""Inf""" )
UpperCAmelCase = head_importance.view(-1 ).sort()[1]
if len(lowerCAmelCase__ ) <= num_to_mask:
print("""BREAK BY num_to_mask""" )
break
# mask heads
UpperCAmelCase = current_heads_to_mask[:num_to_mask]
logger.info("""Heads to mask: %s""" , str(current_heads_to_mask.tolist() ) )
UpperCAmelCase = new_head_mask.view(-1 )
UpperCAmelCase = 0.0
UpperCAmelCase = new_head_mask.view_as(lowerCAmelCase__ )
UpperCAmelCase = new_head_mask.clone().detach()
print_ad_tensor(lowerCAmelCase__ )
# Compute metric and head importance again
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = compute_heads_importance(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , compute_entropy=lowerCAmelCase__ , head_mask=lowerCAmelCase__ )
UpperCAmelCase = 1 / loss
logger.info(
"""Masking: current score: %f, remaining heads %d (%.1f percents)""" , lowerCAmelCase__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("""Final head mask""" )
print_ad_tensor(lowerCAmelCase__ )
np.save(os.path.join(args.output_dir , """head_mask.npy""" ) , head_mask.detach().cpu().numpy() )
return head_mask
def _a ( _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = datetime.now()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = compute_heads_importance(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , compute_entropy=lowerCAmelCase__ , compute_importance=lowerCAmelCase__ , head_mask=lowerCAmelCase__ )
UpperCAmelCase = 1 / loss
UpperCAmelCase = datetime.now() - before_time
UpperCAmelCase = sum(p.numel() for p in model.parameters() )
UpperCAmelCase = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCAmelCase__ ) )
}
for k, v in heads_to_prune.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase = [
v,
]
assert sum(len(lowerCAmelCase__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(lowerCAmelCase__ )
UpperCAmelCase = sum(p.numel() for p in model.parameters() )
UpperCAmelCase = datetime.now()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = compute_heads_importance(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , compute_entropy=lowerCAmelCase__ , compute_importance=lowerCAmelCase__ , head_mask=lowerCAmelCase__ , actually_pruned=lowerCAmelCase__ , )
UpperCAmelCase = 1 / loss
UpperCAmelCase = datetime.now() - before_time
logger.info(
"""Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)""" , lowerCAmelCase__ , lowerCAmelCase__ , pruned_num_params / original_num_params * 100 , )
logger.info("""Pruning: score with masking: %f score with pruning: %f""" , lowerCAmelCase__ , lowerCAmelCase__ )
logger.info("""Pruning: speed ratio (original timing / new timing): %f percents""" , original_time / new_time * 100 )
save_model(lowerCAmelCase__ , args.output_dir )
def _a ( ):
"""simple docstring"""
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--data_dir""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""The input data dir. Should contain the .tsv files (or other data files) for the task.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--output_dir""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""The output directory where the model predictions and checkpoints will be written.""" , )
# Other parameters
parser.add_argument(
"""--config_name""" , default="""""" , type=lowerCAmelCase__ , help="""Pretrained config name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--tokenizer_name""" , default="""""" , type=lowerCAmelCase__ , help="""Pretrained tokenizer name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--cache_dir""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , help="""Where do you want to store the pre-trained models downloaded from s3""" , )
parser.add_argument(
"""--data_subset""" , type=lowerCAmelCase__ , default=-1 , help="""If > 0: limit the data to a subset of data_subset instances.""" )
parser.add_argument(
"""--overwrite_output_dir""" , action="""store_true""" , help="""Whether to overwrite data in output directory""" )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
parser.add_argument(
"""--dont_normalize_importance_by_layer""" , action="""store_true""" , help="""Don\'t normalize importance score by layers""" )
parser.add_argument(
"""--dont_normalize_global_importance""" , action="""store_true""" , help="""Don\'t normalize all importance scores between 0 and 1""" , )
parser.add_argument(
"""--try_masking""" , action="""store_true""" , help="""Whether to try to mask head until a threshold of accuracy.""" )
parser.add_argument(
"""--masking_threshold""" , default=0.9 , type=lowerCAmelCase__ , help="""masking threshold in term of metrics (stop masking when metric < threshold * original metric value).""" , )
parser.add_argument(
"""--masking_amount""" , default=0.1 , type=lowerCAmelCase__ , help="""Amount to heads to masking at each masking step.""" )
parser.add_argument("""--metric_name""" , default="""acc""" , type=lowerCAmelCase__ , help="""Metric to use for head masking.""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=lowerCAmelCase__ , help=(
"""The maximum total input sequence length after WordPiece tokenization. \n"""
"""Sequences longer than this will be truncated, sequences shorter padded."""
) , )
parser.add_argument("""--batch_size""" , default=1 , type=lowerCAmelCase__ , help="""Batch size.""" )
parser.add_argument("""--seed""" , type=lowerCAmelCase__ , default=42 )
parser.add_argument("""--local_rank""" , type=lowerCAmelCase__ , default=-1 , help="""local_rank for distributed training on gpus""" )
parser.add_argument("""--no_cuda""" , action="""store_true""" , help="""Whether not to use CUDA when available""" )
parser.add_argument("""--server_ip""" , type=lowerCAmelCase__ , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=lowerCAmelCase__ , default="""""" , help="""Can be used for distant debugging.""" )
UpperCAmelCase = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCAmelCase__ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
UpperCAmelCase = torch.device("""cuda""" if torch.cuda.is_available() and not args.no_cuda else """cpu""" )
UpperCAmelCase = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
UpperCAmelCase = torch.device("""cuda""" , args.local_rank )
UpperCAmelCase = 1
torch.distributed.init_process_group(backend="""nccl""" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("""device: {} n_gpu: {}, distributed: {}""".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
UpperCAmelCase = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
UpperCAmelCase = nn.parallel.DistributedDataParallel(
lowerCAmelCase__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCAmelCase__ )
elif args.n_gpu > 1:
UpperCAmelCase = nn.DataParallel(lowerCAmelCase__ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=lowerCAmelCase__ )
torch.save(lowerCAmelCase__ , os.path.join(args.output_dir , """run_args.bin""" ) )
logger.info("""Training/evaluation parameters %s""" , lowerCAmelCase__ )
# Prepare dataset
UpperCAmelCase = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
UpperCAmelCase = (torch.from_numpy(lowerCAmelCase__ ),)
UpperCAmelCase = TensorDataset(*lowerCAmelCase__ )
UpperCAmelCase = RandomSampler(lowerCAmelCase__ )
UpperCAmelCase = DataLoader(lowerCAmelCase__ , sampler=lowerCAmelCase__ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
UpperCAmelCase = mask_heads(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
prune_heads(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 721 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
_UpperCamelCase = """https://www.indeed.co.in/jobs?q=mobile+app+development&l="""
def _a ( _snake_case = "mumbai" ):
"""simple docstring"""
UpperCAmelCase = BeautifulSoup(requests.get(url + location ).content , """html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""" , attrs={"""data-tn-component""": """organicJob"""} ):
UpperCAmelCase = job.find("""a""" , attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
UpperCAmelCase = job.find("""span""" , {"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("""Bangalore"""), 1):
print(F"""Job {i:>2} is {job[0]} at {job[1]}""")
| 74 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
_UpperCamelCase = None
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCamelCase = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
_UpperCamelCase = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
_UpperCamelCase = """▁"""
# Segments (not really needed)
_UpperCamelCase = 0
_UpperCamelCase = 1
_UpperCamelCase = 2
_UpperCamelCase = 3
_UpperCamelCase = 4
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = "left"
SCREAMING_SNAKE_CASE = XLNetTokenizer
def __init__( self ,A=None ,A=None ,A=False ,A=True ,A=False ,A="<s>" ,A="</s>" ,A="<unk>" ,A="<sep>" ,A="<pad>" ,A="<cls>" ,A="<mask>" ,A=["<eop>", "<eod>"] ,**A ,):
UpperCAmelCase = AddedToken(UpperCamelCase__ ,lstrip=UpperCamelCase__ ,rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ ,UpperCamelCase__ ) else mask_token
super().__init__(
vocab_file=UpperCamelCase__ ,tokenizer_file=UpperCamelCase__ ,do_lower_case=UpperCamelCase__ ,remove_space=UpperCamelCase__ ,keep_accents=UpperCamelCase__ ,bos_token=UpperCamelCase__ ,eos_token=UpperCamelCase__ ,unk_token=UpperCamelCase__ ,sep_token=UpperCamelCase__ ,pad_token=UpperCamelCase__ ,cls_token=UpperCamelCase__ ,mask_token=UpperCamelCase__ ,additional_special_tokens=UpperCamelCase__ ,**UpperCamelCase__ ,)
UpperCAmelCase = 3
UpperCAmelCase = do_lower_case
UpperCAmelCase = remove_space
UpperCAmelCase = keep_accents
UpperCAmelCase = vocab_file
UpperCAmelCase = False if not self.vocab_file else True
def _UpperCamelCase ( self ,A ,A = None ):
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _UpperCamelCase ( self ,A ,A = None ):
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _UpperCamelCase ( self ,A ,A = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase = os.path.join(
UpperCamelCase__ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file ,UpperCamelCase__ )
return (out_vocab_file,)
| 700 |
"""simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCamelCase ( self ):
UpperCAmelCase = ["""a""", """b""", """c"""]
# Defaults to last layer if both are None
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(A ,A ,A )
self.assertEqual(A ,["""c"""] )
self.assertEqual(A ,[2] )
# Out indices set to match out features
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(["""a""", """c"""] ,A ,A )
self.assertEqual(A ,["""a""", """c"""] )
self.assertEqual(A ,[0, 2] )
# Out features set to match out indices
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(A ,[0, 2] ,A )
self.assertEqual(A ,["""a""", """c"""] )
self.assertEqual(A ,[0, 2] )
# Out features selected from negative indices
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(A ,[-3, -1] ,A )
self.assertEqual(A ,["""a""", """c"""] )
self.assertEqual(A ,[-3, -1] )
def _UpperCamelCase ( self ):
# Stage names must be set
with self.assertRaises(A ):
verify_out_features_out_indices(["""a""", """b"""] ,(0, 1) ,A )
# Out features must be a list
with self.assertRaises(A ):
verify_out_features_out_indices(("""a""", """b""") ,(0, 1) ,["""a""", """b"""] )
# Out features must be a subset of stage names
with self.assertRaises(A ):
verify_out_features_out_indices(["""a""", """b"""] ,(0, 1) ,["""a"""] )
# Out indices must be a list or tuple
with self.assertRaises(A ):
verify_out_features_out_indices(A ,0 ,["""a""", """b"""] )
# Out indices must be a subset of stage names
with self.assertRaises(A ):
verify_out_features_out_indices(A ,(0, 1) ,["""a"""] )
# Out features and out indices must be the same length
with self.assertRaises(A ):
verify_out_features_out_indices(["""a""", """b"""] ,(0,) ,["""a""", """b""", """c"""] )
# Out features should match out indices
with self.assertRaises(A ):
verify_out_features_out_indices(["""a""", """b"""] ,(0, 2) ,["""a""", """b""", """c"""] )
# Out features and out indices should be in order
with self.assertRaises(A ):
verify_out_features_out_indices(["""b""", """a"""] ,(0, 1) ,["""a""", """b"""] )
# Check passes with valid inputs
verify_out_features_out_indices(["""a""", """b""", """d"""] ,(0, 1, -1) ,["""a""", """b""", """c""", """d"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = BackboneMixin()
UpperCAmelCase = ["""a""", """b""", """c"""]
UpperCAmelCase = ["""a""", """c"""]
UpperCAmelCase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features ,["""a""", """c"""] )
self.assertEqual(backbone.out_indices ,[0, 2] )
# Check out features and indices are updated correctly
UpperCAmelCase = ["""a""", """b"""]
self.assertEqual(backbone.out_features ,["""a""", """b"""] )
self.assertEqual(backbone.out_indices ,[0, 1] )
UpperCAmelCase = [-3, -1]
self.assertEqual(backbone.out_features ,["""a""", """c"""] )
self.assertEqual(backbone.out_indices ,[-3, -1] )
| 74 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
class lowerCamelCase__ ( snake_case_ ):
SCREAMING_SNAKE_CASE = """bert-generation"""
def __init__( self ,A=50_358 ,A=1_024 ,A=24 ,A=16 ,A=4_096 ,A="gelu" ,A=0.1 ,A=0.1 ,A=512 ,A=0.02 ,A=1e-1_2 ,A=0 ,A=2 ,A=1 ,A="absolute" ,A=True ,**A ,):
super().__init__(pad_token_id=A ,bos_token_id=A ,eos_token_id=A ,**A )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = position_embedding_type
UpperCAmelCase = use_cache
| 701 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class lowerCamelCase__ :
def __init__( self ,A = 6 ):
UpperCAmelCase = None
UpperCAmelCase = None
self.create_linked_list(A )
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = Node()
UpperCAmelCase = current_node
UpperCAmelCase = current_node
UpperCAmelCase = current_node
for _ in range(1 ,A ):
UpperCAmelCase = Node()
UpperCAmelCase = current_node
UpperCAmelCase = previous_node
UpperCAmelCase = current_node
UpperCAmelCase = self.front
UpperCAmelCase = previous_node
def _UpperCamelCase ( self ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def _UpperCamelCase ( self ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def _UpperCamelCase ( self ,A ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
UpperCAmelCase = self.rear.next
if self.rear:
UpperCAmelCase = data
def _UpperCamelCase ( self ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
UpperCAmelCase = self.front.data
UpperCAmelCase = None
return data
UpperCAmelCase = self.front
UpperCAmelCase = old_front.next
UpperCAmelCase = old_front.data
UpperCAmelCase = None
return data
def _UpperCamelCase ( self ):
if self.is_empty():
raise Exception("""Empty Queue""" )
def _UpperCamelCase ( self ):
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class lowerCamelCase__ :
def __init__( self ):
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 | 0 |
"""simple docstring"""
class lowerCamelCase__ :
def __init__( self ,A ):
UpperCAmelCase = set_counts
UpperCAmelCase = max(lowercase__ )
UpperCAmelCase = len(lowercase__ )
UpperCAmelCase = [1] * num_sets
UpperCAmelCase = list(range(lowercase__ ) )
def _UpperCamelCase ( self ,A ,A ):
UpperCAmelCase = self.get_parent(lowercase__ )
UpperCAmelCase = self.get_parent(lowercase__ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
UpperCAmelCase = 0
UpperCAmelCase = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
UpperCAmelCase = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
UpperCAmelCase = 0
UpperCAmelCase = src_parent
UpperCAmelCase = self.set_counts[src_parent]
UpperCAmelCase = max(self.max_set ,lowercase__ )
return True
def _UpperCamelCase ( self ,A ):
if self.parents[disj_set] == disj_set:
return disj_set
UpperCAmelCase = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 702 |
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger("""transformers.models.speecht5""")
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
hf_model.apply_weight_norm()
UpperCAmelCase = checkpoint["""input_conv.weight_g"""]
UpperCAmelCase = checkpoint["""input_conv.weight_v"""]
UpperCAmelCase = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
UpperCAmelCase = checkpoint[F'''upsamples.{i}.1.weight_g''']
UpperCAmelCase = checkpoint[F'''upsamples.{i}.1.weight_v''']
UpperCAmelCase = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
UpperCAmelCase = checkpoint["""output_conv.1.weight_g"""]
UpperCAmelCase = checkpoint["""output_conv.1.weight_v"""]
UpperCAmelCase = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def _a ( _snake_case , _snake_case , _snake_case , _snake_case=None , _snake_case=None , ):
"""simple docstring"""
if config_path is not None:
UpperCAmelCase = SpeechTaHifiGanConfig.from_pretrained(_snake_case )
else:
UpperCAmelCase = SpeechTaHifiGanConfig()
UpperCAmelCase = SpeechTaHifiGan(_snake_case )
UpperCAmelCase = torch.load(_snake_case )
load_weights(orig_checkpoint["""model"""]["""generator"""] , _snake_case , _snake_case )
UpperCAmelCase = np.load(_snake_case )
UpperCAmelCase = stats[0].reshape(-1 )
UpperCAmelCase = stats[1].reshape(-1 )
UpperCAmelCase = torch.from_numpy(_snake_case ).float()
UpperCAmelCase = torch.from_numpy(_snake_case ).float()
model.save_pretrained(_snake_case )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(_snake_case )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_UpperCamelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 74 | 0 |
"""simple docstring"""
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class lowerCamelCase__ ( snake_case ):
def __init__( self ,A="" ,A="train" ):
assert os.path.isdir(A )
UpperCAmelCase = []
UpperCAmelCase = os.listdir(A )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
UpperCAmelCase = os.path.join(A ,A )
if not os.path.isfile(A ):
continue
self.documents.append(A )
def __len__( self ):
return len(self.documents )
def __getitem__( self ,A ):
UpperCAmelCase = self.documents[idx]
UpperCAmelCase = document_path.split("""/""" )[-1]
with open(A ,encoding="""utf-8""" ) as source:
UpperCAmelCase = source.read()
UpperCAmelCase = process_story(A )
return document_name, story_lines, summary_lines
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = list(filter(lambda _snake_case : len(SCREAMING_SNAKE_CASE_ ) != 0 , [line.strip() for line in raw_story.split("""\n""" )] ) )
# for some unknown reason some lines miss a period, add it
UpperCAmelCase = [_add_missing_period(SCREAMING_SNAKE_CASE_ ) for line in nonempty_lines]
# gather article lines
UpperCAmelCase = []
UpperCAmelCase = deque(SCREAMING_SNAKE_CASE_ )
while True:
try:
UpperCAmelCase = lines.popleft()
if element.startswith("""@highlight""" ):
break
story_lines.append(SCREAMING_SNAKE_CASE_ )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
UpperCAmelCase = list(filter(lambda _snake_case : not t.startswith("""@highlight""" ) , SCREAMING_SNAKE_CASE_ ) )
return story_lines, summary_lines
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = ['.', '!', '?', '...', '\'', '`', '"', '\u2019', '\u2019', ')']
if line.startswith("""@highlight""" ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE_ ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(SCREAMING_SNAKE_CASE_ )) )
return sequence
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = torch.ones_like(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = sequence == pad_token_id
UpperCAmelCase = 0
return mask
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = [tokenizer.encode(SCREAMING_SNAKE_CASE_ ) for line in story_lines]
UpperCAmelCase = [token for sentence in story_lines_token_ids for token in sentence]
UpperCAmelCase = [tokenizer.encode(SCREAMING_SNAKE_CASE_ ) for line in summary_lines]
UpperCAmelCase = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = []
for sequence in batch:
UpperCAmelCase = -1
UpperCAmelCase = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(SCREAMING_SNAKE_CASE_ )
return torch.tensor(SCREAMING_SNAKE_CASE_ )
| 703 |
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
_UpperCamelCase = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_UpperCamelCase = concatenate_datasets
_UpperCamelCase = DownloadConfig
_UpperCamelCase = DownloadManager
_UpperCamelCase = DownloadMode
_UpperCamelCase = DownloadConfig
_UpperCamelCase = DownloadMode
_UpperCamelCase = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 74 | 0 |
"""simple docstring"""
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = VideoMAEConfig()
set_architecture_configs(__lowercase , __lowercase )
if "finetuned" not in model_name:
UpperCAmelCase = False
if "finetuned" in model_name:
UpperCAmelCase = 'huggingface/label-files'
if "kinetics" in model_name:
UpperCAmelCase = 400
UpperCAmelCase = 'kinetics400-id2label.json'
elif "ssv2" in model_name:
UpperCAmelCase = 174
UpperCAmelCase = 'something-something-v2-id2label.json'
else:
raise ValueError("""Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.""" )
UpperCAmelCase = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase = {int(__lowercase ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
if "small" in model_name:
UpperCAmelCase = 384
UpperCAmelCase = 1536
UpperCAmelCase = 12
UpperCAmelCase = 16
UpperCAmelCase = 12
UpperCAmelCase = 3
UpperCAmelCase = 192
UpperCAmelCase = 768
elif "large" in model_name:
UpperCAmelCase = 1024
UpperCAmelCase = 4096
UpperCAmelCase = 24
UpperCAmelCase = 16
UpperCAmelCase = 12
UpperCAmelCase = 8
UpperCAmelCase = 512
UpperCAmelCase = 2048
elif "huge" in model_name:
UpperCAmelCase = 1280
UpperCAmelCase = 5120
UpperCAmelCase = 32
UpperCAmelCase = 16
UpperCAmelCase = 12
UpperCAmelCase = 8
UpperCAmelCase = 640
UpperCAmelCase = 2560
elif "base" not in model_name:
raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" )
def _a ( _snake_case ):
"""simple docstring"""
if "encoder." in name:
UpperCAmelCase = name.replace("""encoder.""" , """""" )
if "cls_token" in name:
UpperCAmelCase = name.replace("""cls_token""" , """videomae.embeddings.cls_token""" )
if "decoder_pos_embed" in name:
UpperCAmelCase = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
UpperCAmelCase = name.replace("""pos_embed""" , """videomae.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
UpperCAmelCase = name.replace("""patch_embed.proj""" , """videomae.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
UpperCAmelCase = name.replace("""patch_embed.norm""" , """videomae.embeddings.norm""" )
if "decoder.blocks" in name:
UpperCAmelCase = name.replace("""decoder.blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
UpperCAmelCase = name.replace("""blocks""" , """videomae.encoder.layer""" )
if "attn.proj" in name:
UpperCAmelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "bias" not in name:
UpperCAmelCase = name.replace("""attn""" , """attention.self""" )
if "attn" in name:
UpperCAmelCase = name.replace("""attn""" , """attention.attention""" )
if "norm1" in name:
UpperCAmelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
UpperCAmelCase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
UpperCAmelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
UpperCAmelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
UpperCAmelCase = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
UpperCAmelCase = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
UpperCAmelCase = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
UpperCAmelCase = name.replace("""norm.weight""" , """videomae.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
UpperCAmelCase = name.replace("""norm.bias""" , """videomae.layernorm.bias""" )
if "head" in name and "decoder" not in name:
UpperCAmelCase = name.replace("""head""" , """classifier""" )
return name
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCAmelCase = orig_state_dict.pop(__lowercase )
if key.startswith("""encoder.""" ):
UpperCAmelCase = key.replace("""encoder.""" , """""" )
if "qkv" in key:
UpperCAmelCase = key.split(""".""" )
if key.startswith("""decoder.blocks""" ):
UpperCAmelCase = config.decoder_hidden_size
UpperCAmelCase = int(key_split[2] )
UpperCAmelCase = 'decoder.decoder_layers.'
if "weight" in key:
UpperCAmelCase = val[:dim, :]
UpperCAmelCase = val[dim : dim * 2, :]
UpperCAmelCase = val[-dim:, :]
else:
UpperCAmelCase = config.hidden_size
UpperCAmelCase = int(key_split[1] )
UpperCAmelCase = 'videomae.encoder.layer.'
if "weight" in key:
UpperCAmelCase = val[:dim, :]
UpperCAmelCase = val[dim : dim * 2, :]
UpperCAmelCase = val[-dim:, :]
else:
UpperCAmelCase = val
return orig_state_dict
def _a ( ):
"""simple docstring"""
UpperCAmelCase = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
UpperCAmelCase = np.load(__lowercase )
return list(__lowercase )
def _a ( _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = get_videomae_config(__lowercase )
if "finetuned" in model_name:
UpperCAmelCase = VideoMAEForVideoClassification(__lowercase )
else:
UpperCAmelCase = VideoMAEForPreTraining(__lowercase )
# download original checkpoint, hosted on Google Drive
UpperCAmelCase = 'pytorch_model.bin'
gdown.cached_download(__lowercase , __lowercase , quiet=__lowercase )
UpperCAmelCase = torch.load(__lowercase , map_location="""cpu""" )
if "model" in files:
UpperCAmelCase = files['model']
else:
UpperCAmelCase = files['module']
UpperCAmelCase = convert_state_dict(__lowercase , __lowercase )
model.load_state_dict(__lowercase )
model.eval()
# verify model on basic input
UpperCAmelCase = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
UpperCAmelCase = prepare_video()
UpperCAmelCase = image_processor(__lowercase , return_tensors="""pt""" )
if "finetuned" not in model_name:
UpperCAmelCase = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
UpperCAmelCase = torch.load(__lowercase )
UpperCAmelCase = model(**__lowercase )
UpperCAmelCase = outputs.logits
UpperCAmelCase = [
'videomae-small-finetuned-kinetics',
'videomae-small-finetuned-ssv2',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'videomae-base-short',
'videomae-base-short-finetuned-kinetics',
'videomae-base',
'videomae-base-finetuned-kinetics',
'videomae-large',
'videomae-large-finetuned-kinetics',
'videomae-huge-finetuned-kinetics',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'videomae-base-short-ssv2',
'videomae-base-short-finetuned-ssv2',
'videomae-base-ssv2',
'videomae-base-finetuned-ssv2',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
UpperCAmelCase = torch.Size([1, 400] )
UpperCAmelCase = torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
UpperCAmelCase = torch.Size([1, 174] )
UpperCAmelCase = torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
UpperCAmelCase = torch.Size([1, 1408, 1536] )
UpperCAmelCase = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
UpperCAmelCase = torch.Size([1, 1408, 1536] )
UpperCAmelCase = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
UpperCAmelCase = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
UpperCAmelCase = torch.Size([1, 1408, 1536] )
UpperCAmelCase = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
UpperCAmelCase = torch.Size([1, 400] )
UpperCAmelCase = torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
UpperCAmelCase = torch.Size([1, 400] )
UpperCAmelCase = torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
UpperCAmelCase = torch.Size([1, 400] )
UpperCAmelCase = torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
UpperCAmelCase = torch.Size([1, 400] )
UpperCAmelCase = torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
UpperCAmelCase = torch.Size([1, 1408, 1536] )
UpperCAmelCase = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
UpperCAmelCase = torch.Size([1, 174] )
UpperCAmelCase = torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
UpperCAmelCase = torch.Size([1, 1408, 1536] )
UpperCAmelCase = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
UpperCAmelCase = torch.Size([1, 174] )
UpperCAmelCase = torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(F'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __lowercase , atol=1E-4 )
else:
print("""Logits:""" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __lowercase , atol=1E-4 )
print("""Logits ok!""" )
# verify loss, if applicable
if model_name == "videomae-base-short":
UpperCAmelCase = outputs.loss
assert torch.allclose(__lowercase , __lowercase , atol=1E-4 )
print("""Loss ok!""" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowercase )
model.save_pretrained(__lowercase )
if push_to_hub:
print("""Pushing to the hub...""" )
model.push_to_hub(__lowercase , organization="""nielsr""" )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""",
type=str,
help=(
"""URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"""
""" download link."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/Users/nielsrogge/Documents/VideoMAE/Test""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""")
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_UpperCamelCase = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 704 |
"""simple docstring"""
def _a ( _snake_case ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ):
raise ValueError("""Input must be an integer""" )
if input_num <= 0:
raise ValueError("""Input must be positive""" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/config.json""",
"""funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json""",
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/config.json""",
"""funnel-transformer/medium-base""": """https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json""",
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/config.json""",
"""funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json""",
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json""",
"""funnel-transformer/xlarge-base""": """https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json""",
}
class lowerCamelCase__ ( __lowercase ):
SCREAMING_SNAKE_CASE = '''funnel'''
SCREAMING_SNAKE_CASE = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self ,A=30_522 ,A=[4, 4, 4] ,A=None ,A=2 ,A=768 ,A=12 ,A=64 ,A=3_072 ,A="gelu_new" ,A=0.1 ,A=0.1 ,A=0.0 ,A=0.1 ,A=None ,A=1e-9 ,A="mean" ,A="relative_shift" ,A=True ,A=True ,A=True ,**A ,):
UpperCAmelCase = vocab_size
UpperCAmelCase = block_sizes
UpperCAmelCase = [1] * len(_A ) if block_repeats is None else block_repeats
assert len(_A ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
UpperCAmelCase = num_decoder_layers
UpperCAmelCase = d_model
UpperCAmelCase = n_head
UpperCAmelCase = d_head
UpperCAmelCase = d_inner
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = initializer_range
UpperCAmelCase = initializer_std
UpperCAmelCase = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'''
UpperCAmelCase = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'''
UpperCAmelCase = attention_type
UpperCAmelCase = separate_cls
UpperCAmelCase = truncate_seq
UpperCAmelCase = pool_q_only
super().__init__(**_A )
@property
def _UpperCamelCase ( self ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def _UpperCamelCase ( self ,A ):
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.""" )
@property
def _UpperCamelCase ( self ):
return len(self.block_sizes )
@num_blocks.setter
def _UpperCamelCase ( self ,A ):
raise NotImplementedError("""This model does not support the setting of `num_blocks`. Please set `block_sizes`.""" )
| 705 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_UpperCamelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
_UpperCamelCase = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
_UpperCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , )
SCREAMING_SNAKE_CASE = field(default=snake_case , metadata={'''help''': '''A folder containing the training data.'''} )
SCREAMING_SNAKE_CASE = field(default=snake_case , metadata={'''help''': '''A folder containing the validation data.'''} )
SCREAMING_SNAKE_CASE = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
SCREAMING_SNAKE_CASE = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} )
SCREAMING_SNAKE_CASE = field(
default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def _UpperCamelCase ( self ):
UpperCAmelCase = {}
if self.train_dir is not None:
UpperCAmelCase = self.train_dir
if self.validation_dir is not None:
UpperCAmelCase = self.validation_dir
UpperCAmelCase = data_files if data_files else None
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '''
'''checkpoint identifier on the hub. '''
'''Don\'t set if you want to train a model from scratch.'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(snake_case )} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , )
SCREAMING_SNAKE_CASE = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
SCREAMING_SNAKE_CASE = field(default=snake_case , metadata={'''help''': '''Name or path of preprocessor config.'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Stride to use for the encoder.'''} , )
class lowerCamelCase__ :
def __init__( self ,A=192 ,A=32 ,A=4 ,A=0.6 ):
UpperCAmelCase = input_size
UpperCAmelCase = mask_patch_size
UpperCAmelCase = model_patch_size
UpperCAmelCase = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("""Input size must be divisible by mask patch size""" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("""Mask patch size must be divisible by model patch size""" )
UpperCAmelCase = self.input_size // self.mask_patch_size
UpperCAmelCase = self.mask_patch_size // self.model_patch_size
UpperCAmelCase = self.rand_size**2
UpperCAmelCase = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self ):
UpperCAmelCase = np.random.permutation(self.token_count )[: self.mask_count]
UpperCAmelCase = np.zeros(self.token_count ,dtype=A )
UpperCAmelCase = 1
UpperCAmelCase = mask.reshape((self.rand_size, self.rand_size) )
UpperCAmelCase = mask.repeat(self.scale ,axis=0 ).repeat(self.scale ,axis=1 )
return torch.tensor(mask.flatten() )
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = torch.stack([example["""pixel_values"""] for example in examples] )
UpperCAmelCase = torch.stack([example["""mask"""] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def _a ( ):
"""simple docstring"""
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mim""" , _snake_case , _snake_case )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(_snake_case )
transformers.utils.logging.set_verbosity(_snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCAmelCase = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _snake_case ) and data_args.train_val_split > 0.0:
UpperCAmelCase = ds["""train"""].train_test_split(data_args.train_val_split )
UpperCAmelCase = split["""train"""]
UpperCAmelCase = split["""test"""]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
UpperCAmelCase = AutoConfig.from_pretrained(model_args.config_name_or_path , **_snake_case )
elif model_args.model_name_or_path:
UpperCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , **_snake_case )
else:
UpperCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(_snake_case , """decoder_type""" ):
UpperCAmelCase = """simmim"""
# adapt config
UpperCAmelCase = model_args.image_size if model_args.image_size is not None else config.image_size
UpperCAmelCase = model_args.patch_size if model_args.patch_size is not None else config.patch_size
UpperCAmelCase = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"""image_size""": model_args.image_size,
"""patch_size""": model_args.patch_size,
"""encoder_stride""": model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
UpperCAmelCase = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **_snake_case )
elif model_args.model_name_or_path:
UpperCAmelCase = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **_snake_case )
else:
UpperCAmelCase = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
UpperCAmelCase = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
UpperCAmelCase = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
UpperCAmelCase = AutoModelForMaskedImageModeling.from_config(_snake_case )
if training_args.do_train:
UpperCAmelCase = ds["""train"""].column_names
else:
UpperCAmelCase = ds["""validation"""].column_names
if data_args.image_column_name is not None:
UpperCAmelCase = data_args.image_column_name
elif "image" in column_names:
UpperCAmelCase = """image"""
elif "img" in column_names:
UpperCAmelCase = """img"""
else:
UpperCAmelCase = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
UpperCAmelCase = Compose(
[
Lambda(lambda _snake_case : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
UpperCAmelCase = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(_snake_case ):
UpperCAmelCase = [transforms(_snake_case ) for image in examples[image_column_name]]
UpperCAmelCase = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
UpperCAmelCase = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_snake_case )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
UpperCAmelCase = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_snake_case )
# Initialize our trainer
UpperCAmelCase = Trainer(
model=_snake_case , args=_snake_case , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=_snake_case , data_collator=_snake_case , )
# Training
if training_args.do_train:
UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase = last_checkpoint
UpperCAmelCase = trainer.train(resume_from_checkpoint=_snake_case )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCAmelCase = trainer.evaluate()
trainer.log_metrics("""eval""" , _snake_case )
trainer.save_metrics("""eval""" , _snake_case )
# Write model card and (optionally) push to hub
UpperCAmelCase = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """masked-image-modeling""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-image-modeling"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_snake_case )
else:
trainer.create_model_card(**_snake_case )
if __name__ == "__main__":
main()
| 74 | 0 |
"""simple docstring"""
def _a ( _snake_case = 100 ):
"""simple docstring"""
UpperCAmelCase = 0
UpperCAmelCase = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 706 |
"""simple docstring"""
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
_UpperCamelCase = True
from torch.cuda.amp import autocast
_UpperCamelCase = logging.getLogger(__name__)
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Whether to log verbose messages or not.'''} , )
SCREAMING_SNAKE_CASE = field(
default=2.0 , metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} )
SCREAMING_SNAKE_CASE = field(
default=0.5 , metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} )
SCREAMING_SNAKE_CASE = field(
default=0.99_99_95 , metadata={'''help''': '''Decay of gumbel temperature during training.'''} )
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
UpperCAmelCase = logging.WARNING
if model_args.verbose_logging:
UpperCAmelCase = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
UpperCAmelCase = logging.INFO
logger.setLevel(_snake_case )
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
SCREAMING_SNAKE_CASE = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
SCREAMING_SNAKE_CASE = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
SCREAMING_SNAKE_CASE = field(
default='''file''' , metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
SCREAMING_SNAKE_CASE = field(
default=1 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
SCREAMING_SNAKE_CASE = field(
default=20.0 , metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} )
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = "longest"
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
def __call__( self ,A ):
# reformat list to dict and set to pytorch format
UpperCAmelCase = self.feature_extractor.pad(
A ,max_length=self.max_length ,padding=self.padding ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors="""pt""" ,)
UpperCAmelCase = self.model._get_feat_extract_output_lengths(batch["""input_values"""].shape[-1] )
UpperCAmelCase = batch["""input_values"""].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
UpperCAmelCase = self.model._get_feat_extract_output_lengths(batch["""attention_mask"""].sum(-1 ) ).to(
torch.long )
UpperCAmelCase = torch.zeros(
(batch_size, mask_indices_seq_length) ,dtype=torch.long ,device=batch["""input_values"""].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
UpperCAmelCase = 1
UpperCAmelCase = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
UpperCAmelCase = _compute_mask_indices(
(batch_size, mask_indices_seq_length) ,self.model.config.mask_time_prob ,self.model.config.mask_time_length ,attention_mask=A ,min_masks=2 ,)
return batch
class lowerCamelCase__ ( snake_case ):
def __init__( self ,*A ,A=1 ,A=0 ,A=1.0 ,**A ):
super().__init__(*A ,**A )
UpperCAmelCase = 0
UpperCAmelCase = max_gumbel_temp
UpperCAmelCase = min_gumbel_temp
UpperCAmelCase = gumbel_temp_decay
def _UpperCamelCase ( self ,A ,A ):
model.train()
UpperCAmelCase = self._prepare_inputs(A )
if self.use_amp:
with autocast():
UpperCAmelCase = self.compute_loss(A ,A )
else:
UpperCAmelCase = self.compute_loss(A ,A )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
UpperCAmelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
UpperCAmelCase = loss.sum() / (inputs["""mask_time_indices"""]).sum()
else:
raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
UpperCAmelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(A ).backward()
elif self.use_apex:
with amp.scale_loss(A ,self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(A )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) )
return loss.detach()
def _a ( ):
"""simple docstring"""
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
configure_logger(_snake_case , _snake_case )
# Downloading and loading a dataset from the hub.
UpperCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
UpperCAmelCase = DatasetDict()
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , )
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
UpperCAmelCase = DatasetDict()
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="""validation""" , cache_dir=model_args.cache_dir , )
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_snake_case )
def prepare_dataset(_snake_case ):
# check that all files have the correct sampling rate
UpperCAmelCase , UpperCAmelCase = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
UpperCAmelCase = datasets.map(
_snake_case , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["""train"""].column_names )
# filter audio files that are too long
UpperCAmelCase = vectorized_datasets.filter(
lambda _snake_case : len(data["""speech"""] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_snake_case ):
return feature_extractor(batch["""speech"""] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
UpperCAmelCase = vectorized_datasets.map(
_snake_case , batched=_snake_case , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["""train"""].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
UpperCAmelCase = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"""PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"""
""" ``config.feat_extract_norm='layer'""" )
UpperCAmelCase = WavaVecaForPreTraining(_snake_case )
UpperCAmelCase = DataCollatorForWavaVecaPretraining(model=_snake_case , feature_extractor=_snake_case )
UpperCAmelCase = WavaVecaPreTrainer(
model=_snake_case , data_collator=_snake_case , args=_snake_case , train_dataset=vectorized_datasets["""train"""] , eval_dataset=vectorized_datasets["""validation"""] , tokenizer=_snake_case , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 74 | 0 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class lowerCamelCase__ :
def __init__( self ,A ,A=13 ,A=7 ,A=True ,A=True ,A=False ,A=True ,A=99 ,A=32 ,A=5 ,A=4 ,A=37 ,A="gelu" ,A=0.1 ,A=0.1 ,A=512 ,A=16 ,A=2 ,A=0.02 ,A=3 ,A=4 ,A=None ,):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
def _UpperCamelCase ( self ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self ):
return OpenLlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=a_ ,initializer_range=self.initializer_range ,use_stable_embedding=a_ ,)
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ):
UpperCAmelCase = OpenLlamaModel(config=a_ )
model.to(a_ )
model.eval()
UpperCAmelCase = model(a_ ,attention_mask=a_ )
UpperCAmelCase = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = True
UpperCAmelCase = OpenLlamaModel(a_ )
model.to(a_ )
model.eval()
UpperCAmelCase = model(
a_ ,attention_mask=a_ ,encoder_hidden_states=a_ ,encoder_attention_mask=a_ ,)
UpperCAmelCase = model(
a_ ,attention_mask=a_ ,encoder_hidden_states=a_ ,)
UpperCAmelCase = model(a_ ,attention_mask=a_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = OpenLlamaForCausalLM(config=a_ )
model.to(a_ )
model.eval()
UpperCAmelCase = model(a_ ,attention_mask=a_ ,labels=a_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = OpenLlamaForCausalLM(config=a_ )
model.to(a_ )
model.eval()
# first forward pass
UpperCAmelCase = model(
a_ ,attention_mask=a_ ,encoder_hidden_states=a_ ,encoder_attention_mask=a_ ,use_cache=a_ ,)
UpperCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase = ids_tensor((self.batch_size, 3) ,config.vocab_size )
UpperCAmelCase = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
UpperCAmelCase = torch.cat([input_ids, next_tokens] ,dim=-1 )
UpperCAmelCase = torch.cat([input_mask, next_mask] ,dim=-1 )
UpperCAmelCase = model(
a_ ,attention_mask=a_ ,encoder_hidden_states=a_ ,encoder_attention_mask=a_ ,output_hidden_states=a_ ,)["hidden_states"][0]
UpperCAmelCase = model(
a_ ,attention_mask=a_ ,encoder_hidden_states=a_ ,encoder_attention_mask=a_ ,past_key_values=a_ ,output_hidden_states=a_ ,)["hidden_states"][0]
# select random slice
UpperCAmelCase = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a_ ,a_ ,atol=1e-3 ) )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.prepare_config_and_inputs()
(
UpperCAmelCase
) = config_and_inputs
UpperCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE = (OpenLlamaForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _UpperCamelCase ( self ):
UpperCAmelCase = OpenLlamaModelTester(self )
UpperCAmelCase = ConfigTester(self ,config_class=a_ ,hidden_size=37 )
def _UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase = type
self.model_tester.create_and_check_model(*a_ )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = 3
UpperCAmelCase = input_dict["input_ids"]
UpperCAmelCase = input_ids.ne(1 ).to(a_ )
UpperCAmelCase = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
UpperCAmelCase = OpenLlamaForSequenceClassification(a_ )
model.to(a_ )
model.eval()
UpperCAmelCase = model(a_ ,attention_mask=a_ ,labels=a_ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = 3
UpperCAmelCase = "single_label_classification"
UpperCAmelCase = input_dict["input_ids"]
UpperCAmelCase = input_ids.ne(1 ).to(a_ )
UpperCAmelCase = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
UpperCAmelCase = OpenLlamaForSequenceClassification(a_ )
model.to(a_ )
model.eval()
UpperCAmelCase = model(a_ ,attention_mask=a_ ,labels=a_ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = 3
UpperCAmelCase = "multi_label_classification"
UpperCAmelCase = input_dict["input_ids"]
UpperCAmelCase = input_ids.ne(1 ).to(a_ )
UpperCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase = OpenLlamaForSequenceClassification(a_ )
model.to(a_ )
model.eval()
UpperCAmelCase = model(a_ ,attention_mask=a_ ,labels=a_ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def _UpperCamelCase ( self ):
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = ids_tensor([1, 10] ,config.vocab_size )
UpperCAmelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase = OpenLlamaModel(a_ )
original_model.to(a_ )
original_model.eval()
UpperCAmelCase = original_model(a_ ).last_hidden_state
UpperCAmelCase = original_model(a_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase = {"type": scaling_type, "factor": 10.0}
UpperCAmelCase = OpenLlamaModel(a_ )
scaled_model.to(a_ )
scaled_model.eval()
UpperCAmelCase = scaled_model(a_ ).last_hidden_state
UpperCAmelCase = scaled_model(a_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(a_ ,a_ ,atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(a_ ,a_ ,atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(a_ ,a_ ,atol=1e-5 ) )
| 707 |
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class lowerCamelCase__ :
def __init__( self ,A ,):
UpperCAmelCase = parent
UpperCAmelCase = 13
UpperCAmelCase = 7
UpperCAmelCase = 30
UpperCAmelCase = self.seq_length + self.mem_len
UpperCAmelCase = 15
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = 99
UpperCAmelCase = [10, 50, 80]
UpperCAmelCase = 32
UpperCAmelCase = 32
UpperCAmelCase = 4
UpperCAmelCase = 8
UpperCAmelCase = 128
UpperCAmelCase = 2
UpperCAmelCase = 2
UpperCAmelCase = None
UpperCAmelCase = 1
UpperCAmelCase = 0
UpperCAmelCase = 3
UpperCAmelCase = self.vocab_size - 1
UpperCAmelCase = 0.01
def _UpperCamelCase ( self ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = TransfoXLConfig(
vocab_size=self.vocab_size ,mem_len=self.mem_len ,clamp_len=self.clamp_len ,cutoffs=self.cutoffs ,d_model=self.hidden_size ,d_embed=self.d_embed ,n_head=self.num_attention_heads ,d_head=self.d_head ,d_inner=self.d_inner ,div_val=self.div_val ,n_layer=self.num_hidden_layers ,eos_token_id=self.eos_token_id ,pad_token_id=self.vocab_size - 1 ,init_range=self.init_range ,num_labels=self.num_labels ,)
return (config, input_ids_a, input_ids_a, lm_labels)
def _UpperCamelCase ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def _UpperCamelCase ( self ,A ,A ,A ,A ):
UpperCAmelCase = TFTransfoXLModel(A )
UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple()
UpperCAmelCase = {"""input_ids""": input_ids_a, """mems""": mems_a}
UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
def _UpperCamelCase ( self ,A ,A ,A ,A ):
UpperCAmelCase = TFTransfoXLLMHeadModel(A )
UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple()
UpperCAmelCase = {"""input_ids""": input_ids_a, """labels""": lm_labels}
UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple()
UpperCAmelCase , UpperCAmelCase = model([input_ids_a, mems_a] ).to_tuple()
UpperCAmelCase = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
def _UpperCamelCase ( self ,A ,A ,A ,A ):
UpperCAmelCase = TFTransfoXLForSequenceClassification(A )
UpperCAmelCase = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.prepare_config_and_inputs()
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) = config_and_inputs
UpperCAmelCase = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( snake_case , snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
SCREAMING_SNAKE_CASE = () if is_tf_available() else ()
SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': TFTransfoXLModel,
'''text-classification''': TFTransfoXLForSequenceClassification,
'''text-generation''': TFTransfoXLLMHeadModel,
'''zero-shot''': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def _UpperCamelCase ( self ):
UpperCAmelCase = TFTransfoXLModelTester(self )
UpperCAmelCase = ConfigTester(self ,config_class=A ,d_embed=37 )
def _UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
self.model_tester.set_seed()
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*A )
def _UpperCamelCase ( self ):
self.model_tester.set_seed()
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(A )
assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
UpperCAmelCase = model.get_output_embeddings()
assert isinstance(A ,tf.keras.layers.Layer )
UpperCAmelCase = model.get_bias()
assert name is None
else:
UpperCAmelCase = model.get_output_embeddings()
assert x is None
UpperCAmelCase = model.get_bias()
assert name is None
def _UpperCamelCase ( self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def _UpperCamelCase ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = TFTransfoXLModel.from_pretrained(A )
self.assertIsNotNone(A )
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" )
def _UpperCamelCase ( self ):
pass
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def _UpperCamelCase ( self ):
UpperCAmelCase = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
UpperCAmelCase = tf.convert_to_tensor([[33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0]] ,dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
UpperCAmelCase = [33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0,33,1,1_857,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,28,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
UpperCAmelCase = model.generate(A ,max_length=200 ,do_sample=A )
self.assertListEqual(output_ids[0].numpy().tolist() ,A )
| 74 | 0 |
"""simple docstring"""
from __future__ import annotations
_UpperCamelCase = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCamelCase__ :
def __init__( self ,A ,A ):
UpperCAmelCase = graph
# mapping node to its parent in resulting breadth first tree
UpperCAmelCase = {}
UpperCAmelCase = source_vertex
def _UpperCamelCase ( self ):
UpperCAmelCase = {self.source_vertex}
UpperCAmelCase = None
UpperCAmelCase = [self.source_vertex] # first in first out queue
while queue:
UpperCAmelCase = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(_SCREAMING_SNAKE_CASE )
UpperCAmelCase = vertex
queue.append(_SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( self ,A ):
if target_vertex == self.source_vertex:
return self.source_vertex
UpperCAmelCase = self.parent.get(_SCREAMING_SNAKE_CASE )
if target_vertex_parent is None:
UpperCAmelCase = (
F'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(_SCREAMING_SNAKE_CASE )
return self.shortest_path(_SCREAMING_SNAKE_CASE ) + F'''->{target_vertex}'''
if __name__ == "__main__":
_UpperCamelCase = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 708 |
"""simple docstring"""
from math import sqrt
def _a ( _snake_case = 100_0000 ):
"""simple docstring"""
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_snake_case , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""")
| 74 | 0 |
"""simple docstring"""
import os
def _a ( ):
"""simple docstring"""
UpperCAmelCase = os.path.join(os.path.dirname(A_ ) , """num.txt""" )
with open(A_ ) as file_hand:
return str(sum(int(A_ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 709 |
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
_UpperCamelCase = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
_UpperCamelCase = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def _a ( _snake_case , _snake_case=False ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = create_model(
"""HTSAT-tiny""" , """roberta""" , _snake_case , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=_snake_case , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = {}
UpperCAmelCase = R""".*sequential.(\d+).*"""
UpperCAmelCase = R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCAmelCase = key.replace(_snake_case , _snake_case )
if re.match(_snake_case , _snake_case ):
# replace sequential layers with list
UpperCAmelCase = re.match(_snake_case , _snake_case ).group(1 )
UpperCAmelCase = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(_snake_case )//3}.linear.''' )
elif re.match(_snake_case , _snake_case ):
UpperCAmelCase = int(re.match(_snake_case , _snake_case ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
UpperCAmelCase = 1 if projecton_layer == 0 else 2
UpperCAmelCase = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
UpperCAmelCase = value
UpperCAmelCase = mixed_qkv.size(0 ) // 3
UpperCAmelCase = mixed_qkv[:qkv_dim]
UpperCAmelCase = mixed_qkv[qkv_dim : qkv_dim * 2]
UpperCAmelCase = mixed_qkv[qkv_dim * 2 :]
UpperCAmelCase = query_layer
UpperCAmelCase = key_layer
UpperCAmelCase = value_layer
else:
UpperCAmelCase = value
return model_state_dict
def _a ( _snake_case , _snake_case , _snake_case , _snake_case=False ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = init_clap(_snake_case , enable_fusion=_snake_case )
clap_model.eval()
UpperCAmelCase = clap_model.state_dict()
UpperCAmelCase = rename_state_dict(_snake_case )
UpperCAmelCase = ClapConfig()
UpperCAmelCase = enable_fusion
UpperCAmelCase = ClapModel(_snake_case )
# ignore the spectrogram embedding layer
model.load_state_dict(_snake_case , strict=_snake_case )
model.save_pretrained(_snake_case )
transformers_config.save_pretrained(_snake_case )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
_UpperCamelCase = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 74 | 0 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _a ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=True , _snake_case="pt" ):
"""simple docstring"""
UpperCAmelCase = {'add_prefix_space': True} if isinstance(_snake_case , _snake_case ) and not line.startswith(""" """ ) else {}
UpperCAmelCase = padding_side
return tokenizer(
[line] , max_length=_snake_case , padding="""max_length""" if pad_to_max_length else None , truncation=_snake_case , return_tensors=_snake_case , add_special_tokens=_snake_case , **_snake_case , )
def _a ( _snake_case , _snake_case , _snake_case=None , ):
"""simple docstring"""
UpperCAmelCase = input_ids.ne(_snake_case ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class lowerCamelCase__ ( snake_case__ ):
def __init__( self ,A ,A ,A ,A ,A="train" ,A=None ,A=None ,A=None ,A="" ,):
super().__init__()
UpperCAmelCase = Path(_A ).joinpath(type_path + """.source""" )
UpperCAmelCase = Path(_A ).joinpath(type_path + """.target""" )
UpperCAmelCase = self.get_char_lens(self.src_file )
UpperCAmelCase = max_source_length
UpperCAmelCase = max_target_length
assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}'''
UpperCAmelCase = tokenizer
UpperCAmelCase = prefix
if n_obs is not None:
UpperCAmelCase = self.src_lens[:n_obs]
UpperCAmelCase = src_lang
UpperCAmelCase = tgt_lang
def __len__( self ):
return len(self.src_lens )
def __getitem__( self ,A ):
UpperCAmelCase = index + 1 # linecache starts at 1
UpperCAmelCase = self.prefix + linecache.getline(str(self.src_file ) ,_A ).rstrip("""\n""" )
UpperCAmelCase = linecache.getline(str(self.tgt_file ) ,_A ).rstrip("""\n""" )
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,_A ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
UpperCAmelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,_A ) else self.tokenizer
)
UpperCAmelCase = self.tokenizer.generator if isinstance(self.tokenizer ,_A ) else self.tokenizer
UpperCAmelCase = encode_line(_A ,_A ,self.max_source_length ,"""right""" )
UpperCAmelCase = encode_line(_A ,_A ,self.max_target_length ,"""right""" )
UpperCAmelCase = source_inputs['input_ids'].squeeze()
UpperCAmelCase = target_inputs['input_ids'].squeeze()
UpperCAmelCase = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _UpperCamelCase ( A ):
return [len(_A ) for x in Path(_A ).open().readlines()]
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = torch.stack([x["""input_ids"""] for x in batch] )
UpperCAmelCase = torch.stack([x["""attention_mask"""] for x in batch] )
UpperCAmelCase = torch.stack([x["""decoder_input_ids"""] for x in batch] )
UpperCAmelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,_A )
else self.tokenizer.pad_token_id
)
UpperCAmelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,_A )
else self.tokenizer.pad_token_id
)
UpperCAmelCase = trim_batch(_A ,_A )
UpperCAmelCase = trim_batch(_A ,_A ,attention_mask=_A )
UpperCAmelCase = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
_UpperCamelCase = getLogger(__name__)
def _a ( _snake_case ):
"""simple docstring"""
return list(itertools.chain.from_iterable(_snake_case ) )
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = get_git_info()
save_json(_snake_case , os.path.join(_snake_case , """git_log.json""" ) )
def _a ( _snake_case , _snake_case , _snake_case=4 , **_snake_case ):
"""simple docstring"""
with open(_snake_case , """w""" ) as f:
json.dump(_snake_case , _snake_case , indent=_snake_case , **_snake_case )
def _a ( _snake_case ):
"""simple docstring"""
with open(_snake_case ) as f:
return json.load(_snake_case )
def _a ( ):
"""simple docstring"""
UpperCAmelCase = git.Repo(search_parent_directories=_snake_case )
UpperCAmelCase = {
'repo_id': str(_snake_case ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
return list(map(_snake_case , _snake_case ) )
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
with open(_snake_case , """wb""" ) as f:
return pickle.dump(_snake_case , _snake_case )
def _a ( _snake_case ):
"""simple docstring"""
def remove_articles(_snake_case ):
return re.sub(R"""\b(a|an|the)\b""" , """ """ , _snake_case )
def white_space_fix(_snake_case ):
return " ".join(text.split() )
def remove_punc(_snake_case ):
UpperCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_snake_case ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_snake_case ) ) ) )
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = normalize_answer(_snake_case ).split()
UpperCAmelCase = normalize_answer(_snake_case ).split()
UpperCAmelCase = Counter(_snake_case ) & Counter(_snake_case )
UpperCAmelCase = sum(common.values() )
if num_same == 0:
return 0
UpperCAmelCase = 1.0 * num_same / len(_snake_case )
UpperCAmelCase = 1.0 * num_same / len(_snake_case )
UpperCAmelCase = (2 * precision * recall) / (precision + recall)
return fa
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
return normalize_answer(_snake_case ) == normalize_answer(_snake_case )
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
assert len(_snake_case ) == len(_snake_case )
UpperCAmelCase = 0
for hypo, pred in zip(_snake_case , _snake_case ):
em += exact_match_score(_snake_case , _snake_case )
if len(_snake_case ) > 0:
em /= len(_snake_case )
return {"em": em}
def _a ( _snake_case ):
"""simple docstring"""
return model_prefix.startswith("""rag""" )
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
UpperCAmelCase = 'dropout_rate'
for p in extra_params:
if getattr(_snake_case , _snake_case , _snake_case ):
if not hasattr(_snake_case , _snake_case ) and not hasattr(_snake_case , equivalent_param[p] ):
logger.info("""config doesn\'t have a `{}` attribute""".format(_snake_case ) )
delattr(_snake_case , _snake_case )
continue
UpperCAmelCase = p if hasattr(_snake_case , _snake_case ) else equivalent_param[p]
setattr(_snake_case , _snake_case , getattr(_snake_case , _snake_case ) )
delattr(_snake_case , _snake_case )
return hparams, config
| 710 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_UpperCamelCase = abspath(join(dirname(dirname(__file__)), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def _a ( _snake_case ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_snake_case )
def _a ( _snake_case ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
UpperCAmelCase = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(_snake_case , id=_snake_case )
| 74 | 0 |
"""simple docstring"""
from math import asin, atan, cos, radians, sin, sqrt, tan
_UpperCamelCase = 6378137.0
_UpperCamelCase = 6356752.314245
_UpperCamelCase = 6378137
def _a ( _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = (AXIS_A - AXIS_B) / AXIS_A
UpperCAmelCase = atan((1 - flattening) * tan(radians(__snake_case ) ) )
UpperCAmelCase = atan((1 - flattening) * tan(radians(__snake_case ) ) )
UpperCAmelCase = radians(__snake_case )
UpperCAmelCase = radians(__snake_case )
# Equation
UpperCAmelCase = sin((phi_a - phi_a) / 2 )
UpperCAmelCase = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
UpperCAmelCase = sqrt(sin_sq_phi + (cos(__snake_case ) * cos(__snake_case ) * sin_sq_lambda) )
return 2 * RADIUS * asin(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import MutableSequence
class lowerCamelCase__ :
def __init__( self ,A ,A ):
if len(A ) != degree + 1:
raise ValueError(
"""The number of coefficients should be equal to the degree + 1.""" )
UpperCAmelCase = list(A )
UpperCAmelCase = degree
def __add__( self ,A ):
if self.degree > polynomial_a.degree:
UpperCAmelCase = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree ,A )
else:
UpperCAmelCase = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree ,A )
def __sub__( self ,A ):
return self + polynomial_a * Polynomial(0 ,[-1] )
def __neg__( self ):
return Polynomial(self.degree ,[-c for c in self.coefficients] )
def __mul__( self ,A ):
UpperCAmelCase = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree ,A )
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ):
UpperCAmelCase = """"""
for i in range(self.degree ,-1 ,-1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(A )
return polynomial
def __repr__( self ):
return self.__str__()
def _UpperCamelCase ( self ):
UpperCAmelCase = [0] * self.degree
for i in range(self.degree ):
UpperCAmelCase = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 ,A )
def _UpperCamelCase ( self ,A = 0 ):
UpperCAmelCase = [0] * (self.degree + 2)
UpperCAmelCase = constant
for i in range(self.degree + 1 ):
UpperCAmelCase = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 ,A )
def __eq__( self ,A ):
if not isinstance(A ,A ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self ,A ):
return not self.__eq__(A )
| 74 | 0 |
"""simple docstring"""
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCamelCase ( self ):
UpperCAmelCase = 0
@slow
def _UpperCamelCase ( self ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
UpperCAmelCase = AutoTokenizer.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ ,(BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(lowerCamelCase_ ) ,0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
UpperCAmelCase = AutoTokenizer.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ ,(GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(lowerCamelCase_ ) ,0 )
def _UpperCamelCase ( self ):
UpperCAmelCase = AutoTokenizer.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,12 )
def _UpperCamelCase ( self ):
UpperCAmelCase = AutoTokenizer.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ ,(RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,20 )
def _UpperCamelCase ( self ):
UpperCAmelCase = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
# Check that tokenizer_type ≠ model_type
UpperCAmelCase = AutoTokenizer.from_pretrained(lowerCamelCase_ ,config=lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,12 )
def _UpperCamelCase ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" ,os.path.join(lowerCamelCase_ ,"""vocab.txt""" ) )
UpperCAmelCase = AutoTokenizer.from_pretrained(lowerCamelCase_ ,tokenizer_type="""bert""" ,use_fast=lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" ,os.path.join(lowerCamelCase_ ,"""vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" ,os.path.join(lowerCamelCase_ ,"""merges.txt""" ) )
UpperCAmelCase = AutoTokenizer.from_pretrained(lowerCamelCase_ ,tokenizer_type="""gpt2""" ,use_fast=lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
@require_tokenizers
def _UpperCamelCase ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" ,os.path.join(lowerCamelCase_ ,"""vocab.txt""" ) )
UpperCAmelCase = AutoTokenizer.from_pretrained(lowerCamelCase_ ,tokenizer_type="""bert""" )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" ,os.path.join(lowerCamelCase_ ,"""vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" ,os.path.join(lowerCamelCase_ ,"""merges.txt""" ) )
UpperCAmelCase = AutoTokenizer.from_pretrained(lowerCamelCase_ ,tokenizer_type="""gpt2""" )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
def _UpperCamelCase ( self ):
with pytest.raises(lowerCamelCase_ ):
AutoTokenizer.from_pretrained("""./""" ,tokenizer_type="""xxx""" )
@require_tokenizers
def _UpperCamelCase ( self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
UpperCAmelCase = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" )
self.assertIsInstance(lowerCamelCase_ ,(BertTokenizer, BertTokenizerFast) )
if isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case ,lowerCamelCase_ )
else:
self.assertEqual(tokenizer.do_lower_case ,lowerCamelCase_ )
self.assertEqual(tokenizer.model_max_length ,512 )
@require_tokenizers
def _UpperCamelCase ( self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
lowerCamelCase_ ,"""julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" ,):
UpperCAmelCase = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" )
def _UpperCamelCase ( self ):
UpperCAmelCase = TOKENIZER_MAPPING.values()
UpperCAmelCase = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(lowerCamelCase_ )
@require_tokenizers
def _UpperCamelCase ( self ):
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ,use_fast=lowerCamelCase_ ) ,lowerCamelCase_ )
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) ,lowerCamelCase_ )
@require_tokenizers
def _UpperCamelCase ( self ):
UpperCAmelCase = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" ,do_lower_case=lowerCamelCase_ )
UpperCAmelCase = """Hello, world. How are you?"""
UpperCAmelCase = tokenizer.tokenize(lowerCamelCase_ )
self.assertEqual("""[UNK]""" ,tokens[0] )
UpperCAmelCase = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" ,do_lower_case=lowerCamelCase_ )
UpperCAmelCase = tokenizer.tokenize(lowerCamelCase_ )
self.assertEqual("""[UNK]""" ,tokens[0] )
@require_tokenizers
def _UpperCamelCase ( self ):
UpperCAmelCase = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" )
self.assertEqual(type(lowerCamelCase_ ) ,lowerCamelCase_ )
self.assertEqual(tokenizer.model_max_length ,512 )
self.assertEqual(tokenizer.vocab_size ,30_000 )
self.assertEqual(tokenizer.unk_token ,"""[UNK]""" )
self.assertEqual(tokenizer.padding_side ,"""right""" )
self.assertEqual(tokenizer.truncation_side ,"""right""" )
def _UpperCamelCase ( self ):
UpperCAmelCase = AutoTokenizer.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ ,(BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase_ )
UpperCAmelCase = AutoTokenizer.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ ,tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size ,12 )
def _UpperCamelCase ( self ):
UpperCAmelCase = AutoTokenizer.from_pretrained("""ctrl""" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
def _UpperCamelCase ( self ):
UpperCAmelCase = get_tokenizer_config("""bert-base-cased""" )
UpperCAmelCase = config.pop("""_commit_hash""" ,lowerCamelCase_ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(lowerCamelCase_ ,{"""do_lower_case""": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
UpperCAmelCase = get_tokenizer_config(lowerCamelCase_ )
self.assertDictEqual(lowerCamelCase_ ,{} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
UpperCAmelCase = AutoTokenizer.from_pretrained(lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase_ )
UpperCAmelCase = get_tokenizer_config(lowerCamelCase_ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["""tokenizer_class"""] ,"""BertTokenizer""" )
def _UpperCamelCase ( self ):
try:
AutoConfig.register("""custom""" ,lowerCamelCase_ )
AutoTokenizer.register(lowerCamelCase_ ,slow_tokenizer_class=lowerCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase_ ):
AutoTokenizer.register(lowerCamelCase_ ,slow_tokenizer_class=lowerCamelCase_ )
UpperCAmelCase = CustomTokenizer.from_pretrained(lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase_ )
UpperCAmelCase = AutoTokenizer.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def _UpperCamelCase ( self ):
try:
AutoConfig.register("""custom""" ,lowerCamelCase_ )
# Can register in two steps
AutoTokenizer.register(lowerCamelCase_ ,slow_tokenizer_class=lowerCamelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, None) )
AutoTokenizer.register(lowerCamelCase_ ,fast_tokenizer_class=lowerCamelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
lowerCamelCase_ ,slow_tokenizer_class=lowerCamelCase_ ,fast_tokenizer_class=lowerCamelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase_ ):
AutoTokenizer.register(lowerCamelCase_ ,fast_tokenizer_class=lowerCamelCase_ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase = BertTokenizerFast.from_pretrained(lowerCamelCase_ )
bert_tokenizer.save_pretrained(lowerCamelCase_ )
UpperCAmelCase = CustomTokenizerFast.from_pretrained(lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase_ )
UpperCAmelCase = AutoTokenizer.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase = AutoTokenizer.from_pretrained(lowerCamelCase_ ,use_fast=lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _UpperCamelCase ( self ):
with self.assertRaises(lowerCamelCase_ ):
UpperCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase_ ):
UpperCAmelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" ,trust_remote_code=lowerCamelCase_ )
UpperCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" ,trust_remote_code=lowerCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase_ )
UpperCAmelCase = AutoTokenizer.from_pretrained(lowerCamelCase_ ,trust_remote_code=lowerCamelCase_ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ ,"""NewTokenizerFast""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,"""NewTokenizerFast""" )
# Test we can also load the slow version
UpperCAmelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" ,trust_remote_code=lowerCamelCase_ ,use_fast=lowerCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ ,"""NewTokenizer""" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase_ )
UpperCAmelCase = AutoTokenizer.from_pretrained(lowerCamelCase_ ,trust_remote_code=lowerCamelCase_ ,use_fast=lowerCamelCase_ )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,"""NewTokenizer""" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ ,"""NewTokenizer""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,"""NewTokenizer""" )
@require_tokenizers
def _UpperCamelCase ( self ):
class lowerCamelCase__ ( lowercase_ ):
SCREAMING_SNAKE_CASE = False
class lowerCamelCase__ ( lowercase_ ):
SCREAMING_SNAKE_CASE = NewTokenizer
SCREAMING_SNAKE_CASE = False
try:
AutoConfig.register("""custom""" ,lowerCamelCase_ )
AutoTokenizer.register(lowerCamelCase_ ,slow_tokenizer_class=lowerCamelCase_ )
AutoTokenizer.register(lowerCamelCase_ ,fast_tokenizer_class=lowerCamelCase_ )
# If remote code is not set, the default is to use local
UpperCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
self.assertEqual(tokenizer.__class__.__name__ ,"""NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
UpperCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" ,use_fast=lowerCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ ,"""NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
UpperCAmelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" ,trust_remote_code=lowerCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ ,"""NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
UpperCAmelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" ,trust_remote_code=lowerCamelCase_ ,use_fast=lowerCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ ,"""NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
UpperCAmelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" ,trust_remote_code=lowerCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ ,"""NewTokenizerFast""" )
self.assertTrue(tokenizer.special_attribute_present )
UpperCAmelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" ,trust_remote_code=lowerCamelCase_ ,use_fast=lowerCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ ,"""NewTokenizer""" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _UpperCamelCase ( self ):
UpperCAmelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" ,trust_remote_code=lowerCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ ,"""NewTokenizerFast""" )
# Test we can also load the slow version
UpperCAmelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" ,trust_remote_code=lowerCamelCase_ ,use_fast=lowerCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ ,"""NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ ,"""NewTokenizer""" )
def _UpperCamelCase ( self ):
with self.assertRaisesRegex(
lowerCamelCase_ ,"""bert-base is not a local folder and is not a valid model identifier""" ):
UpperCAmelCase = AutoTokenizer.from_pretrained("""bert-base""" )
def _UpperCamelCase ( self ):
with self.assertRaisesRegex(
lowerCamelCase_ ,r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
UpperCAmelCase = AutoTokenizer.from_pretrained(lowerCamelCase_ ,revision="""aaaaaa""" )
def _UpperCamelCase ( self ):
UpperCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
UpperCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count ,0 )
self.assertEqual(counter.head_request_count ,1 )
self.assertEqual(counter.other_request_count ,0 )
| 712 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class lowerCamelCase__ :
def __init__( self ,A ,):
UpperCAmelCase = parent
UpperCAmelCase = 13
UpperCAmelCase = 7
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = 2
UpperCAmelCase = 99
UpperCAmelCase = 0
UpperCAmelCase = 32
UpperCAmelCase = 2
UpperCAmelCase = 4
UpperCAmelCase = 0.1
UpperCAmelCase = 0.1
UpperCAmelCase = 512
UpperCAmelCase = 16
UpperCAmelCase = 2
UpperCAmelCase = 0.02
UpperCAmelCase = 3
UpperCAmelCase = 4
UpperCAmelCase = """last"""
UpperCAmelCase = True
UpperCAmelCase = None
UpperCAmelCase = 0
def _UpperCamelCase ( self ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ,dtype=tf.floataa )
UpperCAmelCase = None
if self.use_input_lengths:
UpperCAmelCase = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] ,2 ,dtype=tf.floataa )
UpperCAmelCase = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase = FlaubertConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,bos_token_id=self.bos_token_id ,)
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = TFFlaubertModel(config=A )
UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCAmelCase = model(A )
UpperCAmelCase = [input_ids, input_mask]
UpperCAmelCase = model(A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = TFFlaubertWithLMHeadModel(A )
UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCAmelCase = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = TFFlaubertForQuestionAnsweringSimple(A )
UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCAmelCase = model(A )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = TFFlaubertForSequenceClassification(A )
UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCAmelCase = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = self.num_labels
UpperCAmelCase = TFFlaubertForTokenClassification(config=A )
UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCAmelCase = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = self.num_choices
UpperCAmelCase = TFFlaubertForMultipleChoice(config=A )
UpperCAmelCase = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
UpperCAmelCase = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""langs""": token_type_ids,
"""lengths""": input_lengths,
}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( snake_case , snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': TFFlaubertModel,
'''fill-mask''': TFFlaubertWithLMHeadModel,
'''question-answering''': TFFlaubertForQuestionAnsweringSimple,
'''text-classification''': TFFlaubertForSequenceClassification,
'''token-classification''': TFFlaubertForTokenClassification,
'''zero-shot''': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _UpperCamelCase ( self ):
UpperCAmelCase = TFFlaubertModelTester(self )
UpperCAmelCase = ConfigTester(self ,config_class=A ,emb_dim=37 )
def _UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*A )
@slow
def _UpperCamelCase ( self ):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = TFFlaubertModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self ):
UpperCAmelCase = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
UpperCAmelCase = tf.convert_to_tensor(
[[0, 158, 735, 2_592, 1_424, 6_727, 82, 1]] ,dtype=tf.intaa ,) # "J'aime flaubert !"
UpperCAmelCase = model(A )[0]
UpperCAmelCase = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape ,A )
# compare the actual values for a slice.
UpperCAmelCase = tf.convert_to_tensor(
[
[
[-1.8768773, -1.566555, 0.27072418],
[-1.6920038, -0.5873505, 1.9329599],
[-2.9563985, -1.6993835, 1.7972052],
]
] ,dtype=tf.floataa ,)
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 74 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_UpperCamelCase = {
"""vocab_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"""
),
"""squeezebert/squeezebert-mnli""": """https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt""",
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"""
),
},
}
_UpperCamelCase = {
"""squeezebert/squeezebert-uncased""": 512,
"""squeezebert/squeezebert-mnli""": 512,
"""squeezebert/squeezebert-mnli-headless""": 512,
}
_UpperCamelCase = {
"""squeezebert/squeezebert-uncased""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli-headless""": {"""do_lower_case""": True},
}
class lowerCamelCase__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = SqueezeBertTokenizer
def __init__( self ,A=None ,A=None ,A=True ,A="[UNK]" ,A="[SEP]" ,A="[PAD]" ,A="[CLS]" ,A="[MASK]" ,A=True ,A=None ,**A ,):
super().__init__(
UpperCamelCase_ ,tokenizer_file=UpperCamelCase_ ,do_lower_case=UpperCamelCase_ ,unk_token=UpperCamelCase_ ,sep_token=UpperCamelCase_ ,pad_token=UpperCamelCase_ ,cls_token=UpperCamelCase_ ,mask_token=UpperCamelCase_ ,tokenize_chinese_chars=UpperCamelCase_ ,strip_accents=UpperCamelCase_ ,**UpperCamelCase_ ,)
UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,UpperCamelCase_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,UpperCamelCase_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,UpperCamelCase_ ) != tokenize_chinese_chars
):
UpperCAmelCase = getattr(UpperCamelCase_ ,normalizer_state.pop("""type""" ) )
UpperCAmelCase = do_lower_case
UpperCAmelCase = strip_accents
UpperCAmelCase = tokenize_chinese_chars
UpperCAmelCase = normalizer_class(**UpperCamelCase_ )
UpperCAmelCase = do_lower_case
def _UpperCamelCase ( self ,A ,A=None ):
UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCamelCase ( self ,A ,A = None ):
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self ,A ,A = None ):
UpperCAmelCase = self._tokenizer.model.save(UpperCamelCase_ ,name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
| 713 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith("""module.encoder""" ):
UpperCAmelCase = key.replace("""module.encoder""" , """glpn.encoder""" )
if key.startswith("""module.decoder""" ):
UpperCAmelCase = key.replace("""module.decoder""" , """decoder.stages""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCAmelCase = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
UpperCAmelCase = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(_snake_case )-1}''' )
if "norm" in key:
UpperCAmelCase = key.replace("""norm""" , """layer_norm""" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCAmelCase = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )]
UpperCAmelCase = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(_snake_case )-1}''' )
if "layer_norm1" in key:
UpperCAmelCase = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
UpperCAmelCase = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
UpperCAmelCase = key[key.find("""block""" ) + len("""block""" )]
UpperCAmelCase = key.replace(F'''block{idx}''' , F'''block.{int(_snake_case )-1}''' )
if "attn.q" in key:
UpperCAmelCase = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
UpperCAmelCase = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
UpperCAmelCase = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
UpperCAmelCase = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
UpperCAmelCase = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
UpperCAmelCase = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
UpperCAmelCase = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
UpperCAmelCase = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCAmelCase = key[key.find("""linear_c""" ) + len("""linear_c""" )]
UpperCAmelCase = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(_snake_case )-1}''' )
if "bot_conv" in key:
UpperCAmelCase = key.replace("""bot_conv""" , """0.convolution""" )
if "skip_conv1" in key:
UpperCAmelCase = key.replace("""skip_conv1""" , """1.convolution""" )
if "skip_conv2" in key:
UpperCAmelCase = key.replace("""skip_conv2""" , """2.convolution""" )
if "fusion1" in key:
UpperCAmelCase = key.replace("""fusion1""" , """1.fusion""" )
if "fusion2" in key:
UpperCAmelCase = key.replace("""fusion2""" , """2.fusion""" )
if "fusion3" in key:
UpperCAmelCase = key.replace("""fusion3""" , """3.fusion""" )
if "fusion" in key and "conv" in key:
UpperCAmelCase = key.replace("""conv""" , """convolutional_layer""" )
if key.startswith("""module.last_layer_depth""" ):
UpperCAmelCase = key.replace("""module.last_layer_depth""" , """head.head""" )
UpperCAmelCase = value
return new_state_dict
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCAmelCase = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
UpperCAmelCase = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
UpperCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
UpperCAmelCase = kv_bias[: config.hidden_sizes[i]]
UpperCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCAmelCase = kv_bias[config.hidden_sizes[i] :]
def _a ( ):
"""simple docstring"""
UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return image
@torch.no_grad()
def _a ( _snake_case , _snake_case , _snake_case=False , _snake_case=None ):
"""simple docstring"""
UpperCAmelCase = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
UpperCAmelCase = GLPNImageProcessor()
# prepare image
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=_snake_case , return_tensors="""pt""" ).pixel_values
logger.info("""Converting model...""" )
# load original state dict
UpperCAmelCase = torch.load(_snake_case , map_location=torch.device("""cpu""" ) )
# rename keys
UpperCAmelCase = rename_keys(_snake_case )
# key and value matrices need special treatment
read_in_k_v(_snake_case , _snake_case )
# create HuggingFace model and load state dict
UpperCAmelCase = GLPNForDepthEstimation(_snake_case )
model.load_state_dict(_snake_case )
model.eval()
# forward pass
UpperCAmelCase = model(_snake_case )
UpperCAmelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
UpperCAmelCase = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
UpperCAmelCase = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
UpperCAmelCase = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , _snake_case , atol=1E-4 )
print("""Looks ok!""" )
# finally, push to hub if required
if push_to_hub:
logger.info("""Pushing model and image processor to the hub...""" )
model.push_to_hub(
repo_path_or_name=Path(_snake_case , _snake_case ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_snake_case , )
image_processor.push_to_hub(
repo_path_or_name=Path(_snake_case , _snake_case ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_snake_case , )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
_UpperCamelCase = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 74 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCamelCase ( self ):
UpperCAmelCase = "laion/clap-htsat-unfused"
UpperCAmelCase = tempfile.mkdtemp()
def _UpperCamelCase ( self ,**A ):
return RobertaTokenizer.from_pretrained(self.checkpoint ,**A )
def _UpperCamelCase ( self ,**A ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint ,**A )
def _UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_feature_extractor()
UpperCAmelCase = ClapProcessor(tokenizer=A ,feature_extractor=A )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,A )
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,A )
def _UpperCamelCase ( self ):
UpperCAmelCase = ClapProcessor(tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
UpperCAmelCase = self.get_feature_extractor(do_normalize=A ,padding_value=1.0 )
UpperCAmelCase = ClapProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=A ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,A )
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.get_feature_extractor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = ClapProcessor(tokenizer=A ,feature_extractor=A )
UpperCAmelCase = floats_list((3, 1_000) )
UpperCAmelCase = feature_extractor(A ,return_tensors="""np""" )
UpperCAmelCase = processor(audios=A ,return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.get_feature_extractor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = ClapProcessor(tokenizer=A ,feature_extractor=A )
UpperCAmelCase = "This is a test string"
UpperCAmelCase = processor(text=A )
UpperCAmelCase = tokenizer(A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.get_feature_extractor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = ClapProcessor(tokenizer=A ,feature_extractor=A )
UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase = processor.batch_decode(A )
UpperCAmelCase = tokenizer.batch_decode(A )
self.assertListEqual(A ,A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.get_feature_extractor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = ClapProcessor(tokenizer=A ,feature_extractor=A )
self.assertListEqual(
processor.model_input_names[2:] ,feature_extractor.model_input_names ,msg="""`processor` and `feature_extractor` model input names do not match""" ,)
| 714 |
"""simple docstring"""
def _a ( _snake_case ): # noqa: E741
"""simple docstring"""
UpperCAmelCase = len(_snake_case )
UpperCAmelCase = 0
UpperCAmelCase = [0] * n
UpperCAmelCase = [False] * n
UpperCAmelCase = [False] * n
def dfs(_snake_case , _snake_case , _snake_case , _snake_case ):
if parent == root:
out_edge_count += 1
UpperCAmelCase = True
UpperCAmelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
UpperCAmelCase = dfs(_snake_case , _snake_case , _snake_case , _snake_case )
UpperCAmelCase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
UpperCAmelCase = True
# AP found via cycle
if at == low[to]:
UpperCAmelCase = True
else:
UpperCAmelCase = min(low[at] , _snake_case )
return out_edge_count
for i in range(_snake_case ):
if not visited[i]:
UpperCAmelCase = 0
UpperCAmelCase = dfs(_snake_case , _snake_case , -1 , _snake_case )
UpperCAmelCase = out_edge_count > 1
for x in range(len(_snake_case ) ):
if is_art[x] is True:
print(_snake_case )
# Adjacency list of graph
_UpperCamelCase = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 74 | 0 |
"""simple docstring"""
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_UpperCamelCase = logging.get_logger(__name__)
class lowerCamelCase__ ( enum.Enum ):
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 1
@add_end_docstrings(snake_case )
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = '''generated'''
def __init__( self ,*A ,**A ):
super().__init__(*lowercase_ ,**lowercase_ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def _UpperCamelCase ( self ,A=None ,A=None ,A=None ,A=None ,A=None ,A=None ,**A ,):
UpperCAmelCase = {}
if truncation is not None:
UpperCAmelCase = truncation
UpperCAmelCase = generate_kwargs
UpperCAmelCase = {}
if return_tensors is not None and return_type is None:
UpperCAmelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
UpperCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
UpperCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCAmelCase = self.tokenizer.encode(lowercase_ ,add_special_tokens=lowercase_ )
if len(lowercase_ ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
UpperCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _UpperCamelCase ( self ,A ,A ,A ):
return True
def _UpperCamelCase ( self ,*A ,A ):
UpperCAmelCase = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] ,lowercase_ ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
UpperCAmelCase = ([prefix + arg for arg in args[0]],)
UpperCAmelCase = True
elif isinstance(args[0] ,lowercase_ ):
UpperCAmelCase = (prefix + args[0],)
UpperCAmelCase = False
else:
raise ValueError(
F''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' )
UpperCAmelCase = self.tokenizer(*lowercase_ ,padding=lowercase_ ,truncation=lowercase_ ,return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self ,*A ,**A ):
UpperCAmelCase = super().__call__(*lowercase_ ,**lowercase_ )
if (
isinstance(args[0] ,lowercase_ )
and all(isinstance(lowercase_ ,lowercase_ ) for el in args[0] )
and all(len(lowercase_ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def _UpperCamelCase ( self ,A ,A=TruncationStrategy.DO_NOT_TRUNCATE ,**A ):
UpperCAmelCase = self._parse_and_tokenize(lowercase_ ,truncation=lowercase_ ,**lowercase_ )
return inputs
def _UpperCamelCase ( self ,A ,**A ):
if self.framework == "pt":
UpperCAmelCase , UpperCAmelCase = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
UpperCAmelCase , UpperCAmelCase = tf.shape(model_inputs["""input_ids"""] ).numpy()
UpperCAmelCase = generate_kwargs.get("""min_length""" ,self.model.config.min_length )
UpperCAmelCase = generate_kwargs.get("""max_length""" ,self.model.config.max_length )
self.check_inputs(lowercase_ ,generate_kwargs["""min_length"""] ,generate_kwargs["""max_length"""] )
UpperCAmelCase = self.model.generate(**lowercase_ ,**lowercase_ )
UpperCAmelCase = output_ids.shape[0]
if self.framework == "pt":
UpperCAmelCase = output_ids.reshape(lowercase_ ,out_b // in_b ,*output_ids.shape[1:] )
elif self.framework == "tf":
UpperCAmelCase = tf.reshape(lowercase_ ,(in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def _UpperCamelCase ( self ,A ,A=ReturnType.TEXT ,A=False ):
UpperCAmelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
UpperCAmelCase = {F'''{self.return_name}_token_ids''': output_ids}
elif return_type == ReturnType.TEXT:
UpperCAmelCase = {
F'''{self.return_name}_text''': self.tokenizer.decode(
lowercase_ ,skip_special_tokens=lowercase_ ,clean_up_tokenization_spaces=lowercase_ ,)
}
records.append(lowercase_ )
return records
@add_end_docstrings(snake_case )
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = '''summary'''
def __call__( self ,*A ,**A ):
return super().__call__(*lowercase_ ,**lowercase_ )
def _UpperCamelCase ( self ,A ,A ,A ):
if max_length < min_length:
logger.warning(F'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' )
if input_length < max_length:
logger.warning(
F'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '''
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
F'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' )
@add_end_docstrings(snake_case )
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = '''translation'''
def _UpperCamelCase ( self ,A ,A ,A ):
if input_length > 0.9 * max_length:
logger.warning(
F'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '''
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def _UpperCamelCase ( self ,*A ,A=TruncationStrategy.DO_NOT_TRUNCATE ,A=None ,A=None ):
if getattr(self.tokenizer ,"""_build_translation_inputs""" ,lowercase_ ):
return self.tokenizer._build_translation_inputs(
*lowercase_ ,return_tensors=self.framework ,truncation=lowercase_ ,src_lang=lowercase_ ,tgt_lang=lowercase_ )
else:
return super()._parse_and_tokenize(*lowercase_ ,truncation=lowercase_ )
def _UpperCamelCase ( self ,A=None ,A=None ,**A ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = super()._sanitize_parameters(**lowercase_ )
if src_lang is not None:
UpperCAmelCase = src_lang
if tgt_lang is not None:
UpperCAmelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
UpperCAmelCase = kwargs.get("""task""" ,self.task )
UpperCAmelCase = task.split("""_""" )
if task and len(lowercase_ ) == 4:
# translation, XX, to YY
UpperCAmelCase = items[1]
UpperCAmelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self ,*A ,**A ):
return super().__call__(*lowercase_ ,**lowercase_ )
| 715 |
"""simple docstring"""
_UpperCamelCase = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_UpperCamelCase = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
_UpperCamelCase = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 74 | 0 |
"""simple docstring"""
from math import isqrt
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , snake_case__ , snake_case__ ):
UpperCAmelCase = False
return [i for i in range(2 , snake_case__ ) if is_prime[i]]
def _a ( _snake_case = 10**8 ):
"""simple docstring"""
UpperCAmelCase = calculate_prime_numbers(max_number // 2 )
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = len(snake_case__ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 716 |
"""simple docstring"""
import argparse
import struct
import unittest
class lowerCamelCase__ :
def __init__( self ,A ):
UpperCAmelCase = data
# Initialize hash values
UpperCAmelCase = [
0x6A_09_E6_67,
0xBB_67_AE_85,
0x3C_6E_F3_72,
0xA5_4F_F5_3A,
0x51_0E_52_7F,
0x9B_05_68_8C,
0x1F_83_D9_AB,
0x5B_E0_CD_19,
]
# Initialize round constants
UpperCAmelCase = [
0x42_8A_2F_98,
0x71_37_44_91,
0xB5_C0_FB_CF,
0xE9_B5_DB_A5,
0x39_56_C2_5B,
0x59_F1_11_F1,
0x92_3F_82_A4,
0xAB_1C_5E_D5,
0xD8_07_AA_98,
0x12_83_5B_01,
0x24_31_85_BE,
0x55_0C_7D_C3,
0x72_BE_5D_74,
0x80_DE_B1_FE,
0x9B_DC_06_A7,
0xC1_9B_F1_74,
0xE4_9B_69_C1,
0xEF_BE_47_86,
0x0F_C1_9D_C6,
0x24_0C_A1_CC,
0x2D_E9_2C_6F,
0x4A_74_84_AA,
0x5C_B0_A9_DC,
0x76_F9_88_DA,
0x98_3E_51_52,
0xA8_31_C6_6D,
0xB0_03_27_C8,
0xBF_59_7F_C7,
0xC6_E0_0B_F3,
0xD5_A7_91_47,
0x06_CA_63_51,
0x14_29_29_67,
0x27_B7_0A_85,
0x2E_1B_21_38,
0x4D_2C_6D_FC,
0x53_38_0D_13,
0x65_0A_73_54,
0x76_6A_0A_BB,
0x81_C2_C9_2E,
0x92_72_2C_85,
0xA2_BF_E8_A1,
0xA8_1A_66_4B,
0xC2_4B_8B_70,
0xC7_6C_51_A3,
0xD1_92_E8_19,
0xD6_99_06_24,
0xF4_0E_35_85,
0x10_6A_A0_70,
0x19_A4_C1_16,
0x1E_37_6C_08,
0x27_48_77_4C,
0x34_B0_BC_B5,
0x39_1C_0C_B3,
0x4E_D8_AA_4A,
0x5B_9C_CA_4F,
0x68_2E_6F_F3,
0x74_8F_82_EE,
0x78_A5_63_6F,
0x84_C8_78_14,
0x8C_C7_02_08,
0x90_BE_FF_FA,
0xA4_50_6C_EB,
0xBE_F9_A3_F7,
0xC6_71_78_F2,
]
UpperCAmelCase = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _UpperCamelCase ( A ):
UpperCAmelCase = b"""\x80""" + (b"""\x00""" * (63 - (len(A ) + 8) % 64))
UpperCAmelCase = struct.pack(""">Q""" ,(len(A ) * 8) )
return data + padding + big_endian_integer
def _UpperCamelCase ( self ):
# Convert into blocks of 64 bytes
UpperCAmelCase = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCAmelCase = list(struct.unpack(""">16L""" ,A ) )
# add 48 0-ed integers
words += [0] * 48
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
UpperCAmelCase = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
UpperCAmelCase = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
UpperCAmelCase = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_00_00_00_00
# Compression
UpperCAmelCase = self.ror(A ,6 ) ^ self.ror(A ,11 ) ^ self.ror(A ,25 )
UpperCAmelCase = (e & f) ^ ((~e & 0xFF_FF_FF_FF) & g)
UpperCAmelCase = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_00_00_00_00
UpperCAmelCase = self.ror(A ,2 ) ^ self.ror(A ,13 ) ^ self.ror(A ,22 )
UpperCAmelCase = (a & b) ^ (a & c) ^ (b & c)
UpperCAmelCase = (sa + maj) % 0x1_00_00_00_00
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = (
g,
f,
e,
((d + tempa) % 0x1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0x1_00_00_00_00),
)
UpperCAmelCase = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCAmelCase = [
((element + mutated_hash_values[index]) % 0x1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
UpperCAmelCase = """""".join([hex(A )[2:].zfill(8 ) for value in self.hashes] )
def _UpperCamelCase ( self ,A ,A ):
return 0xFF_FF_FF_FF & (value << (32 - rotations)) | (value >> rotations)
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCamelCase ( self ):
import hashlib
UpperCAmelCase = bytes("""Test String""" ,"""utf-8""" )
self.assertEqual(SHAaaa(A ).hash ,hashlib.shaaaa(A ).hexdigest() )
def _a ( ):
"""simple docstring"""
import doctest
doctest.testmod()
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
UpperCAmelCase = f.read()
else:
UpperCAmelCase = bytes(_snake_case , """utf-8""" )
print(SHAaaa(_snake_case ).hash )
if __name__ == "__main__":
main()
| 74 | 0 |
"""simple docstring"""
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
_UpperCamelCase = [
'kernels/rwkv/wkv_cuda.cu',
'kernels/rwkv/wkv_op.cpp',
'kernels/deformable_detr/ms_deform_attn.h',
'kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh',
'models/graphormer/algos_graphormer.pyx',
]
def _a ( _snake_case ):
"""simple docstring"""
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--check_lib""", action="""store_true""", help="""Whether to check the build or the actual package.""")
_UpperCamelCase = parser.parse_args()
if args.check_lib:
_UpperCamelCase = importlib.import_module("""transformers""")
_UpperCamelCase = Path(transformers_module.__file__).parent
else:
_UpperCamelCase = Path.cwd() / 'build/lib/transformers'
if not test_custom_files_are_present(transformers_path):
raise ValueError("""The built release does not contain the custom files. Fix this before going further!""")
| 717 |
"""simple docstring"""
def _a ( _snake_case = 10 , _snake_case = 22 ):
"""simple docstring"""
UpperCAmelCase = range(1 , _snake_case )
UpperCAmelCase = range(1 , _snake_case )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"""{solution(10, 22) = }""")
| 74 | 0 |
"""simple docstring"""
from __future__ import annotations
def _a ( _snake_case ): # This function is recursive
"""simple docstring"""
UpperCAmelCase = len(_snake_case )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
UpperCAmelCase = array[0]
UpperCAmelCase = False
UpperCAmelCase = 1
UpperCAmelCase = []
while not is_found and i < array_length:
if array[i] < pivot:
UpperCAmelCase = True
UpperCAmelCase = [element for element in array[i:] if element >= array[i]]
UpperCAmelCase = longest_subsequence(_snake_case )
if len(_snake_case ) > len(_snake_case ):
UpperCAmelCase = temp_array
else:
i += 1
UpperCAmelCase = [element for element in array[1:] if element >= pivot]
UpperCAmelCase = [pivot, *longest_subsequence(_snake_case )]
if len(_snake_case ) > len(_snake_case ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
"""simple docstring"""
from __future__ import annotations
def _a ( _snake_case ):
"""simple docstring"""
return len(set(_snake_case ) ) == len(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 | 0 |
"""simple docstring"""
from math import factorial
_UpperCamelCase = {str(digit): factorial(digit) for digit in range(10)}
def _a ( _snake_case ):
"""simple docstring"""
if not isinstance(__a , __a ):
raise TypeError("""Parameter number must be int""" )
if number < 0:
raise ValueError("""Parameter number must be greater than or equal to 0""" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(__a ) )
def _a ( _snake_case = 60 , _snake_case = 100_0000 ):
"""simple docstring"""
if not isinstance(__a , __a ) or not isinstance(__a , __a ):
raise TypeError("""Parameters chain_length and number_limit must be int""" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"""Parameters chain_length and number_limit must be greater than 0""" )
# the counter for the chains with the exact desired length
UpperCAmelCase = 0
# the cached sizes of the previous chains
UpperCAmelCase = {}
for start_chain_element in range(1 , __a ):
# The temporary set will contain the elements of the chain
UpperCAmelCase = set()
UpperCAmelCase = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
UpperCAmelCase = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(__a )
chain_set_length += 1
UpperCAmelCase = digit_factorial_sum(__a )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
UpperCAmelCase = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution()}""")
| 719 |
"""simple docstring"""
import math
def _a ( _snake_case ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _a ( _snake_case = 0.1 ):
"""simple docstring"""
UpperCAmelCase = 3
UpperCAmelCase = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_snake_case )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_UpperCamelCase = 250004
_UpperCamelCase = 250020
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = MBartaaTokenizer
SCREAMING_SNAKE_CASE = MBartaaTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def _UpperCamelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase = MBartaaTokenizer(snake_case_ ,src_lang="""en_XX""" ,tgt_lang="""ro_RO""" ,keep_accents=snake_case_ )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self ):
UpperCAmelCase = "<s>"
UpperCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) ,snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) ,snake_case_ )
def _UpperCamelCase ( self ):
UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<s>""" )
self.assertEqual(vocab_keys[1] ,"""<pad>""" )
self.assertEqual(vocab_keys[-1] ,"""<mask>""" )
self.assertEqual(len(snake_case_ ) ,1_054 )
def _UpperCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,1_054 )
def _UpperCamelCase ( self ):
UpperCAmelCase = MBartaaTokenizer(snake_case_ ,src_lang="""en_XX""" ,tgt_lang="""ro_RO""" ,keep_accents=snake_case_ )
UpperCAmelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(snake_case_ ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case_ ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,)
UpperCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
snake_case_ ,[SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] ,)
UpperCAmelCase = tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(
snake_case_ ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] ,)
UpperCAmelCase = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(
snake_case_ ,[SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] ,)
@slow
def _UpperCamelCase ( self ):
# fmt: off
UpperCAmelCase = {"input_ids": [[250_004, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [250_004, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250_004, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ ,model_name="""facebook/mbart-large-50""" ,revision="""d3913889c59cd5c9e456b269c376325eabad57e2""" ,)
def _UpperCamelCase ( self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart50", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(snake_case_ ,**snake_case_ )
UpperCAmelCase = self.tokenizer_class.from_pretrained(snake_case_ ,**snake_case_ )
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = tokenizer_r.save_pretrained(snake_case_ )
UpperCAmelCase = tokenizer_p.save_pretrained(snake_case_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
UpperCAmelCase = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(snake_case_ ,snake_case_ )
# Checks everything loads correctly in the same way
UpperCAmelCase = tokenizer_r.from_pretrained(snake_case_ )
UpperCAmelCase = tokenizer_p.from_pretrained(snake_case_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case_ ,snake_case_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(snake_case_ )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = tokenizer_r.save_pretrained(snake_case_ ,legacy_format=snake_case_ )
UpperCAmelCase = tokenizer_p.save_pretrained(snake_case_ )
# Checks it save with the same files
self.assertSequenceEqual(snake_case_ ,snake_case_ )
# Checks everything loads correctly in the same way
UpperCAmelCase = tokenizer_r.from_pretrained(snake_case_ )
UpperCAmelCase = tokenizer_p.from_pretrained(snake_case_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case_ ,snake_case_ ) )
shutil.rmtree(snake_case_ )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = tokenizer_r.save_pretrained(snake_case_ ,legacy_format=snake_case_ )
UpperCAmelCase = tokenizer_p.save_pretrained(snake_case_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase = tokenizer_r.from_pretrained(snake_case_ )
UpperCAmelCase = tokenizer_p.from_pretrained(snake_case_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case_ ,snake_case_ ) )
shutil.rmtree(snake_case_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE = """facebook/mbart-large-50-one-to-many-mmt"""
SCREAMING_SNAKE_CASE = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
SCREAMING_SNAKE_CASE = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
SCREAMING_SNAKE_CASE = [EN_CODE, 8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2]
@classmethod
def _UpperCamelCase ( cls ):
UpperCAmelCase = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name ,src_lang="""en_XX""" ,tgt_lang="""ro_RO""" )
UpperCAmelCase = 1
return cls
def _UpperCamelCase ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] ,250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] ,250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] ,250_020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""mr_IN"""] ,250_038 )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens ,snake_case_ )
def _UpperCamelCase ( self ):
self.assertIn(snake_case_ ,self.tokenizer.all_special_ids )
UpperCAmelCase = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
UpperCAmelCase = self.tokenizer.decode(snake_case_ ,skip_special_tokens=snake_case_ )
UpperCAmelCase = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=snake_case_ )
self.assertEqual(snake_case_ ,snake_case_ )
self.assertNotIn(self.tokenizer.eos_token ,snake_case_ )
def _UpperCamelCase ( self ):
UpperCAmelCase = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] ,snake_case_ )
UpperCAmelCase = 10
UpperCAmelCase = self.tokenizer(snake_case_ ,max_length=snake_case_ ,truncation=snake_case_ ).input_ids[0]
self.assertEqual(ids[0] ,snake_case_ )
self.assertEqual(ids[-1] ,2 )
self.assertEqual(len(snake_case_ ) ,snake_case_ )
def _UpperCamelCase ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) ,[250_053, 250_001] )
def _UpperCamelCase ( self ):
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(snake_case_ )
UpperCAmelCase = MBartaaTokenizer.from_pretrained(snake_case_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,snake_case_ )
@require_torch
def _UpperCamelCase ( self ):
UpperCAmelCase = self.tokenizer(self.src_text ,text_target=self.tgt_text ,padding=snake_case_ ,return_tensors="""pt""" )
UpperCAmelCase = shift_tokens_right(batch["""labels"""] ,self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def _UpperCamelCase ( self ):
UpperCAmelCase = self.tokenizer(
self.src_text ,text_target=self.tgt_text ,padding=snake_case_ ,truncation=snake_case_ ,max_length=len(self.expected_src_tokens ) ,return_tensors="""pt""" ,)
UpperCAmelCase = shift_tokens_right(batch["""labels"""] ,self.tokenizer.pad_token_id )
self.assertIsInstance(snake_case_ ,snake_case_ )
self.assertEqual((2, 14) ,batch.input_ids.shape )
self.assertEqual((2, 14) ,batch.attention_mask.shape )
UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens ,snake_case_ )
self.assertEqual(2 ,batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens ,[EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.tokenizer(self.src_text ,padding=snake_case_ ,truncation=snake_case_ ,max_length=3 ,return_tensors="""pt""" )
UpperCAmelCase = self.tokenizer(
text_target=self.tgt_text ,padding=snake_case_ ,truncation=snake_case_ ,max_length=10 ,return_tensors="""pt""" )
UpperCAmelCase = targets["input_ids"]
UpperCAmelCase = shift_tokens_right(snake_case_ ,self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] ,3 )
self.assertEqual(batch.decoder_input_ids.shape[1] ,10 )
@require_torch
def _UpperCamelCase ( self ):
UpperCAmelCase = self.tokenizer._build_translation_inputs(
"""A test""" ,return_tensors="""pt""" ,src_lang="""en_XX""" ,tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(snake_case_ ) ,{
# en_XX, A, test, EOS
"""input_ids""": [[250_004, 62, 3_034, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 250_001,
} ,)
| 720 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE = '''CLIPImageProcessor'''
SCREAMING_SNAKE_CASE = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self ,A=None ,A=None ,**A ):
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,A ,)
UpperCAmelCase = kwargs.pop("""feature_extractor""" )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(A ,A )
def __call__( self ,A=None ,A=None ,A=None ,**A ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
UpperCAmelCase = self.tokenizer(A ,return_tensors=A ,**A )
if images is not None:
UpperCAmelCase = self.image_processor(A ,return_tensors=A ,**A )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A ) ,tensor_type=A )
def _UpperCamelCase ( self ,*A ,**A ):
return self.tokenizer.batch_decode(*A ,**A )
def _UpperCamelCase ( self ,*A ,**A ):
return self.tokenizer.decode(*A ,**A )
@property
def _UpperCamelCase ( self ):
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _UpperCamelCase ( self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,A ,)
return self.image_processor_class
@property
def _UpperCamelCase ( self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,A ,)
return self.image_processor
| 74 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCamelCase ( self ):
UpperCAmelCase = '''laion/clap-htsat-unfused'''
UpperCAmelCase = tempfile.mkdtemp()
def _UpperCamelCase ( self ,**A ):
return RobertaTokenizer.from_pretrained(self.checkpoint ,**UpperCAmelCase__ )
def _UpperCamelCase ( self ,**A ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint ,**UpperCAmelCase__ )
def _UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_feature_extractor()
UpperCAmelCase = ClapProcessor(tokenizer=UpperCAmelCase__ ,feature_extractor=UpperCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,UpperCAmelCase__ )
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,UpperCAmelCase__ )
def _UpperCamelCase ( self ):
UpperCAmelCase = ClapProcessor(tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
UpperCAmelCase = self.get_feature_extractor(do_normalize=UpperCAmelCase__ ,padding_value=1.0 )
UpperCAmelCase = ClapProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=UpperCAmelCase__ ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,UpperCAmelCase__ )
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,UpperCAmelCase__ )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.get_feature_extractor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = ClapProcessor(tokenizer=UpperCAmelCase__ ,feature_extractor=UpperCAmelCase__ )
UpperCAmelCase = floats_list((3, 1_000) )
UpperCAmelCase = feature_extractor(UpperCAmelCase__ ,return_tensors="""np""" )
UpperCAmelCase = processor(audios=UpperCAmelCase__ ,return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.get_feature_extractor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = ClapProcessor(tokenizer=UpperCAmelCase__ ,feature_extractor=UpperCAmelCase__ )
UpperCAmelCase = '''This is a test string'''
UpperCAmelCase = processor(text=UpperCAmelCase__ )
UpperCAmelCase = tokenizer(UpperCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.get_feature_extractor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = ClapProcessor(tokenizer=UpperCAmelCase__ ,feature_extractor=UpperCAmelCase__ )
UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase = processor.batch_decode(UpperCAmelCase__ )
UpperCAmelCase = tokenizer.batch_decode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ ,UpperCAmelCase__ )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.get_feature_extractor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = ClapProcessor(tokenizer=UpperCAmelCase__ ,feature_extractor=UpperCAmelCase__ )
self.assertListEqual(
processor.model_input_names[2:] ,feature_extractor.model_input_names ,msg="""`processor` and `feature_extractor` model input names do not match""" ,)
| 721 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
_UpperCamelCase = """https://www.indeed.co.in/jobs?q=mobile+app+development&l="""
def _a ( _snake_case = "mumbai" ):
"""simple docstring"""
UpperCAmelCase = BeautifulSoup(requests.get(url + location ).content , """html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""" , attrs={"""data-tn-component""": """organicJob"""} ):
UpperCAmelCase = job.find("""a""" , attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
UpperCAmelCase = job.find("""span""" , {"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("""Bangalore"""), 1):
print(F"""Job {i:>2} is {job[0]} at {job[1]}""")
| 74 | 0 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCamelCase ( self ):
UpperCAmelCase = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] ,dtype=tf.floataa ,)
UpperCAmelCase = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] ,dtype=tf.intaa ,) # expected non filtered idx as noted above
UpperCAmelCase = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] ,dtype=tf.floataa ,) # expected non filtered values as noted above
UpperCAmelCase = tf_top_k_top_p_filtering(_lowerCamelCase ,top_k=10 ,top_p=0.6 ,min_tokens_to_keep=4 )
UpperCAmelCase = output[output != -float("""inf""" )]
UpperCAmelCase = tf.cast(
tf.where(tf.not_equal(_lowerCamelCase ,tf.constant(-float("""inf""" ) ,dtype=tf.floataa ) ) ) ,dtype=tf.intaa ,)
tf.debugging.assert_near(_lowerCamelCase ,_lowerCamelCase ,rtol=1e-1_2 )
tf.debugging.assert_equal(_lowerCamelCase ,_lowerCamelCase )
@require_tf
class lowerCamelCase__ ( unittest.TestCase , __UpperCAmelCase ):
if is_tf_available():
SCREAMING_SNAKE_CASE = {
"AutoModelForCausalLM": TFAutoModelForCausalLM,
"AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeqaSeq,
"AutoModelForSeq2SeqLM": TFAutoModelForSeqaSeqLM,
"AutoModelForVision2Seq": TFAutoModelForVisionaSeq,
"LogitsProcessorList": TFLogitsProcessorList,
"MinLengthLogitsProcessor": TFMinLengthLogitsProcessor,
"create_tensor_fn": tf.convert_to_tensor,
"floats_tensor": floats_tensor,
"return_tensors": "tf",
}
@slow
def _UpperCamelCase ( self ):
# TF-only test: tf.saved_model export
UpperCAmelCase = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase = 2
UpperCAmelCase = 2
class lowerCamelCase__ ( tf.Module ):
def __init__( self ,A ):
super(_lowerCamelCase ,self ).__init__()
UpperCAmelCase = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) ,tf.intaa ,name="""input_ids""" ),
tf.TensorSpec((None, input_length) ,tf.intaa ,name="""attention_mask""" ),
) ,jit_compile=_lowerCamelCase ,)
def _UpperCamelCase ( self ,A ,A ):
UpperCAmelCase = self.model.generate(
input_ids=_lowerCamelCase ,attention_mask=_lowerCamelCase ,max_new_tokens=_lowerCamelCase ,return_dict_in_generate=_lowerCamelCase ,)
return {"sequences": outputs["sequences"]}
UpperCAmelCase = [[2, 0], [102, 103]]
UpperCAmelCase = [[1, 0], [1, 1]]
UpperCAmelCase = DummyModel(model=_lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_lowerCamelCase ,_lowerCamelCase ,signatures={"""serving_default""": dummy_model.serving} )
UpperCAmelCase = tf.saved_model.load(_lowerCamelCase ).signatures["""serving_default"""]
for batch_size in range(1 ,len(_lowerCamelCase ) + 1 ):
UpperCAmelCase = {
"""input_ids""": tf.constant(dummy_input_ids[:batch_size] ),
"""attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ),
}
UpperCAmelCase = serving_func(**_lowerCamelCase )["""sequences"""]
UpperCAmelCase = test_model.generate(**_lowerCamelCase ,max_new_tokens=_lowerCamelCase )
tf.debugging.assert_equal(_lowerCamelCase ,_lowerCamelCase )
@slow
def _UpperCamelCase ( self ):
# TF-only test: tf.saved_model export
UpperCAmelCase = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase = 1
UpperCAmelCase = 2
class lowerCamelCase__ ( tf.Module ):
def __init__( self ,A ):
super(_lowerCamelCase ,self ).__init__()
UpperCAmelCase = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) ,tf.intaa ,name="""input_ids""" ),
tf.TensorSpec((batch_size, None) ,tf.intaa ,name="""attention_mask""" ),
) ,jit_compile=_lowerCamelCase ,)
def _UpperCamelCase ( self ,A ,A ):
UpperCAmelCase = self.model.generate(
input_ids=_lowerCamelCase ,attention_mask=_lowerCamelCase ,max_new_tokens=_lowerCamelCase ,return_dict_in_generate=_lowerCamelCase ,)
return {"sequences": outputs["sequences"]}
UpperCAmelCase = [[2], [102, 103]]
UpperCAmelCase = [[1], [1, 1]]
UpperCAmelCase = DummyModel(model=_lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_lowerCamelCase ,_lowerCamelCase ,signatures={"""serving_default""": dummy_model.serving} )
UpperCAmelCase = tf.saved_model.load(_lowerCamelCase ).signatures["""serving_default"""]
for input_row in range(len(_lowerCamelCase ) ):
UpperCAmelCase = {
"""input_ids""": tf.constant([dummy_input_ids[input_row]] ),
"""attention_mask""": tf.constant([dummy_attention_masks[input_row]] ),
}
UpperCAmelCase = serving_func(**_lowerCamelCase )["""sequences"""]
UpperCAmelCase = test_model.generate(**_lowerCamelCase ,max_new_tokens=_lowerCamelCase )
tf.debugging.assert_equal(_lowerCamelCase ,_lowerCamelCase )
@slow
@require_tensorflow_text
def _UpperCamelCase ( self ):
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="""google/flan-t5-small""" ,filename="""spiece.model""" ,local_dir=_lowerCamelCase )
class lowerCamelCase__ ( tf.keras.layers.Layer ):
def __init__( self ):
super().__init__()
UpperCAmelCase = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(_lowerCamelCase ,"""spiece.model""" ) ,"""rb""" ).read() )
UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
def _UpperCamelCase ( self ,A ,*A ,**A ):
UpperCAmelCase = self.tokenizer.tokenize(_lowerCamelCase )
UpperCAmelCase , UpperCAmelCase = text.pad_model_inputs(
_lowerCamelCase ,max_seq_length=64 ,pad_value=self.model.config.pad_token_id )
UpperCAmelCase = self.model.generate(input_ids=_lowerCamelCase ,attention_mask=_lowerCamelCase )
return self.tokenizer.detokenize(_lowerCamelCase )
UpperCAmelCase = CompleteSentenceTransformer()
UpperCAmelCase = tf.keras.layers.Input(shape=(1,) ,dtype=tf.string ,name="""inputs""" )
UpperCAmelCase = complete_model(_lowerCamelCase )
UpperCAmelCase = tf.keras.Model(_lowerCamelCase ,_lowerCamelCase )
keras_model.save(_lowerCamelCase )
def _UpperCamelCase ( self ):
# Has PT equivalent: this test relies on random sampling
UpperCAmelCase = {
"""do_sample""": True,
"""num_beams""": 1,
"""top_p""": 0.7,
"""top_k""": 10,
"""temperature""": 0.7,
}
UpperCAmelCase = 14
UpperCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase = """Hello, my dog is cute and"""
UpperCAmelCase = tokenizer(_lowerCamelCase ,return_tensors="""tf""" )
UpperCAmelCase = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
UpperCAmelCase = model.generate(**_lowerCamelCase ,eos_token_id=_lowerCamelCase ,**_lowerCamelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
UpperCAmelCase = [638, 198]
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
UpperCAmelCase = model.generate(**_lowerCamelCase ,eos_token_id=_lowerCamelCase ,**_lowerCamelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def _UpperCamelCase ( self ):
# Has PT equivalent: ample use of framework-specific code
UpperCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
UpperCAmelCase = """Hugging Face is a technology company based in New York and Paris."""
UpperCAmelCase = bart_tokenizer(_lowerCamelCase ,return_tensors="""tf""" ).input_ids
UpperCAmelCase = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
UpperCAmelCase = bart_model.generate(_lowerCamelCase ).numpy()
class lowerCamelCase__ ( __UpperCAmelCase ):
def _UpperCamelCase ( self ,A ,A=None ,**A ):
return super().call(_lowerCamelCase ,**_lowerCamelCase )
UpperCAmelCase = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
UpperCAmelCase = bart_model.generate(_lowerCamelCase ,foo="""bar""" ).numpy()
self.assertTrue(np.array_equal(_lowerCamelCase ,_lowerCamelCase ) )
class lowerCamelCase__ ( bart_model.model.encoder.__class__ ):
def _UpperCamelCase ( self ,A ,**A ):
return super().call(_lowerCamelCase ,**_lowerCamelCase )
UpperCAmelCase = FakeEncoder(bart_model.config ,bart_model.model.shared )
UpperCAmelCase = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
UpperCAmelCase = bart_model.generate(_lowerCamelCase ).numpy()
with self.assertRaises(_lowerCamelCase ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(_lowerCamelCase ,foo="""bar""" )
| 700 |
"""simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCamelCase ( self ):
UpperCAmelCase = ["""a""", """b""", """c"""]
# Defaults to last layer if both are None
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(A ,A ,A )
self.assertEqual(A ,["""c"""] )
self.assertEqual(A ,[2] )
# Out indices set to match out features
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(["""a""", """c"""] ,A ,A )
self.assertEqual(A ,["""a""", """c"""] )
self.assertEqual(A ,[0, 2] )
# Out features set to match out indices
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(A ,[0, 2] ,A )
self.assertEqual(A ,["""a""", """c"""] )
self.assertEqual(A ,[0, 2] )
# Out features selected from negative indices
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(A ,[-3, -1] ,A )
self.assertEqual(A ,["""a""", """c"""] )
self.assertEqual(A ,[-3, -1] )
def _UpperCamelCase ( self ):
# Stage names must be set
with self.assertRaises(A ):
verify_out_features_out_indices(["""a""", """b"""] ,(0, 1) ,A )
# Out features must be a list
with self.assertRaises(A ):
verify_out_features_out_indices(("""a""", """b""") ,(0, 1) ,["""a""", """b"""] )
# Out features must be a subset of stage names
with self.assertRaises(A ):
verify_out_features_out_indices(["""a""", """b"""] ,(0, 1) ,["""a"""] )
# Out indices must be a list or tuple
with self.assertRaises(A ):
verify_out_features_out_indices(A ,0 ,["""a""", """b"""] )
# Out indices must be a subset of stage names
with self.assertRaises(A ):
verify_out_features_out_indices(A ,(0, 1) ,["""a"""] )
# Out features and out indices must be the same length
with self.assertRaises(A ):
verify_out_features_out_indices(["""a""", """b"""] ,(0,) ,["""a""", """b""", """c"""] )
# Out features should match out indices
with self.assertRaises(A ):
verify_out_features_out_indices(["""a""", """b"""] ,(0, 2) ,["""a""", """b""", """c"""] )
# Out features and out indices should be in order
with self.assertRaises(A ):
verify_out_features_out_indices(["""b""", """a"""] ,(0, 1) ,["""a""", """b"""] )
# Check passes with valid inputs
verify_out_features_out_indices(["""a""", """b""", """d"""] ,(0, 1, -1) ,["""a""", """b""", """c""", """d"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = BackboneMixin()
UpperCAmelCase = ["""a""", """b""", """c"""]
UpperCAmelCase = ["""a""", """c"""]
UpperCAmelCase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features ,["""a""", """c"""] )
self.assertEqual(backbone.out_indices ,[0, 2] )
# Check out features and indices are updated correctly
UpperCAmelCase = ["""a""", """b"""]
self.assertEqual(backbone.out_features ,["""a""", """b"""] )
self.assertEqual(backbone.out_indices ,[0, 1] )
UpperCAmelCase = [-3, -1]
self.assertEqual(backbone.out_features ,["""a""", """c"""] )
self.assertEqual(backbone.out_indices ,[-3, -1] )
| 74 | 0 |
"""simple docstring"""
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
UpperCAmelCase = len(lowerCamelCase_ ) if (len(lowerCamelCase_ ) > 7) else 7
# Print table header for output
print(
"""Symbol""".center(8 ) , """Stack""".center(lowerCamelCase_ ) , """Postfix""".center(lowerCamelCase_ ) , sep=""" | """ , )
print("""-""" * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(lowerCamelCase_ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(lowerCamelCase_ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(lowerCamelCase_ ) == 0:
stack.append(lowerCamelCase_ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(lowerCamelCase_ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(lowerCamelCase_ ) # push x to stack
print(
x.center(8 ) , ("""""".join(lowerCamelCase_ )).ljust(lowerCamelCase_ ) , ("""""".join(lowerCamelCase_ )).ljust(lowerCamelCase_ ) , sep=""" | """ , ) # Output in tabular format
while len(lowerCamelCase_ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
""" """.center(8 ) , ("""""".join(lowerCamelCase_ )).ljust(lowerCamelCase_ ) , ("""""".join(lowerCamelCase_ )).ljust(lowerCamelCase_ ) , sep=""" | """ , ) # Output in tabular format
return "".join(lowerCamelCase_ ) # return Postfix as str
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = list(infix[::-1] ) # reverse the infix equation
for i in range(len(lowerCamelCase_ ) ):
if infix[i] == "(":
UpperCAmelCase = ''')''' # change "(" to ")"
elif infix[i] == ")":
UpperCAmelCase = '''(''' # change ")" to "("
return (infix_2_postfix("""""".join(lowerCamelCase_ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
_UpperCamelCase = input("""\nEnter an Infix Equation = """) # Input an Infix equation
_UpperCamelCase = ''.join(Infix.split()) # Remove spaces from the input
print("""\n\t""", Infix, """(Infix) -> """, infix_2_prefix(Infix), """(Prefix)""")
| 701 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class lowerCamelCase__ :
def __init__( self ,A = 6 ):
UpperCAmelCase = None
UpperCAmelCase = None
self.create_linked_list(A )
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = Node()
UpperCAmelCase = current_node
UpperCAmelCase = current_node
UpperCAmelCase = current_node
for _ in range(1 ,A ):
UpperCAmelCase = Node()
UpperCAmelCase = current_node
UpperCAmelCase = previous_node
UpperCAmelCase = current_node
UpperCAmelCase = self.front
UpperCAmelCase = previous_node
def _UpperCamelCase ( self ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def _UpperCamelCase ( self ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def _UpperCamelCase ( self ,A ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
UpperCAmelCase = self.rear.next
if self.rear:
UpperCAmelCase = data
def _UpperCamelCase ( self ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
UpperCAmelCase = self.front.data
UpperCAmelCase = None
return data
UpperCAmelCase = self.front
UpperCAmelCase = old_front.next
UpperCAmelCase = old_front.data
UpperCAmelCase = None
return data
def _UpperCamelCase ( self ):
if self.is_empty():
raise Exception("""Empty Queue""" )
def _UpperCamelCase ( self ):
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class lowerCamelCase__ :
def __init__( self ):
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 702 |
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger("""transformers.models.speecht5""")
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
hf_model.apply_weight_norm()
UpperCAmelCase = checkpoint["""input_conv.weight_g"""]
UpperCAmelCase = checkpoint["""input_conv.weight_v"""]
UpperCAmelCase = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
UpperCAmelCase = checkpoint[F'''upsamples.{i}.1.weight_g''']
UpperCAmelCase = checkpoint[F'''upsamples.{i}.1.weight_v''']
UpperCAmelCase = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
UpperCAmelCase = checkpoint["""output_conv.1.weight_g"""]
UpperCAmelCase = checkpoint["""output_conv.1.weight_v"""]
UpperCAmelCase = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def _a ( _snake_case , _snake_case , _snake_case , _snake_case=None , _snake_case=None , ):
"""simple docstring"""
if config_path is not None:
UpperCAmelCase = SpeechTaHifiGanConfig.from_pretrained(_snake_case )
else:
UpperCAmelCase = SpeechTaHifiGanConfig()
UpperCAmelCase = SpeechTaHifiGan(_snake_case )
UpperCAmelCase = torch.load(_snake_case )
load_weights(orig_checkpoint["""model"""]["""generator"""] , _snake_case , _snake_case )
UpperCAmelCase = np.load(_snake_case )
UpperCAmelCase = stats[0].reshape(-1 )
UpperCAmelCase = stats[1].reshape(-1 )
UpperCAmelCase = torch.from_numpy(_snake_case ).float()
UpperCAmelCase = torch.from_numpy(_snake_case ).float()
model.save_pretrained(_snake_case )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(_snake_case )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_UpperCamelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 74 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"""shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class lowerCamelCase__ ( snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = '''nat'''
SCREAMING_SNAKE_CASE = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self ,A=4 ,A=3 ,A=64 ,A=[3, 4, 6, 5] ,A=[2, 4, 8, 16] ,A=7 ,A=3.0 ,A=True ,A=0.0 ,A=0.0 ,A=0.1 ,A="gelu" ,A=0.02 ,A=1e-5 ,A=0.0 ,A=None ,A=None ,**A ,):
super().__init__(**lowercase_ )
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = embed_dim
UpperCAmelCase = depths
UpperCAmelCase = len(lowercase_ )
UpperCAmelCase = num_heads
UpperCAmelCase = kernel_size
UpperCAmelCase = mlp_ratio
UpperCAmelCase = qkv_bias
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = drop_path_rate
UpperCAmelCase = hidden_act
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase = int(embed_dim * 2 ** (len(lowercase_ ) - 1) )
UpperCAmelCase = layer_scale_init_value
UpperCAmelCase = ["stem"] + [F'''stage{idx}''' for idx in range(1 ,len(lowercase_ ) + 1 )]
UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=lowercase_ ,out_indices=lowercase_ ,stage_names=self.stage_names )
| 703 |
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
_UpperCamelCase = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_UpperCamelCase = concatenate_datasets
_UpperCamelCase = DownloadConfig
_UpperCamelCase = DownloadManager
_UpperCamelCase = DownloadMode
_UpperCamelCase = DownloadConfig
_UpperCamelCase = DownloadMode
_UpperCamelCase = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 74 | 0 |
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = {}
UpperCAmelCase = tokenizer(example["""content"""] , truncation=_snake_case )["""input_ids"""]
UpperCAmelCase = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
_UpperCamelCase = HfArgumentParser(PretokenizationArguments)
_UpperCamelCase = parser.parse_args()
if args.num_workers is None:
_UpperCamelCase = multiprocessing.cpu_count()
_UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir)
_UpperCamelCase = time.time()
_UpperCamelCase = load_dataset(args.dataset_name, split="""train""")
print(F"""Dataset loaded in {time.time()-t_start:.2f}s""")
_UpperCamelCase = time.time()
_UpperCamelCase = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F"""Dataset tokenized in {time.time()-t_start:.2f}s""")
_UpperCamelCase = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
| 704 |
"""simple docstring"""
def _a ( _snake_case ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ):
raise ValueError("""Input must be an integer""" )
if input_num <= 0:
raise ValueError("""Input must be positive""" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 | 0 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
_UpperCamelCase = 'pytorch_model.bin'
_UpperCamelCase = 'pytorch_model.bin.index.json'
_UpperCamelCase = 'adapter_config.json'
_UpperCamelCase = 'adapter_model.bin'
_UpperCamelCase = 'adapter_model.safetensors'
_UpperCamelCase = 'tf_model.h5'
_UpperCamelCase = 'tf_model.h5.index.json'
_UpperCamelCase = 'model.ckpt'
_UpperCamelCase = 'flax_model.msgpack'
_UpperCamelCase = 'flax_model.msgpack.index.json'
_UpperCamelCase = 'model.safetensors'
_UpperCamelCase = 'model.safetensors.index.json'
_UpperCamelCase = 'config.json'
_UpperCamelCase = 'preprocessor_config.json'
_UpperCamelCase = FEATURE_EXTRACTOR_NAME
_UpperCamelCase = 'generation_config.json'
_UpperCamelCase = 'modelcard.json'
_UpperCamelCase = '▁'
_UpperCamelCase = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
_UpperCamelCase = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
_UpperCamelCase = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
_UpperCamelCase = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def _a ( _snake_case ):
"""simple docstring"""
if version.parse(_lowerCamelCase ) < version.parse(_lowerCamelCase ):
if "dev" in min_version:
UpperCAmelCase = (
"This example requires a source install from HuggingFace Transformers (see "
"`https://huggingface.co/docs/transformers/installation#install-from-source`),"
)
else:
UpperCAmelCase = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ """Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other """
"""versions of HuggingFace Transformers.""" )
| 705 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_UpperCamelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
_UpperCamelCase = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
_UpperCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , )
SCREAMING_SNAKE_CASE = field(default=snake_case , metadata={'''help''': '''A folder containing the training data.'''} )
SCREAMING_SNAKE_CASE = field(default=snake_case , metadata={'''help''': '''A folder containing the validation data.'''} )
SCREAMING_SNAKE_CASE = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
SCREAMING_SNAKE_CASE = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} )
SCREAMING_SNAKE_CASE = field(
default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def _UpperCamelCase ( self ):
UpperCAmelCase = {}
if self.train_dir is not None:
UpperCAmelCase = self.train_dir
if self.validation_dir is not None:
UpperCAmelCase = self.validation_dir
UpperCAmelCase = data_files if data_files else None
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '''
'''checkpoint identifier on the hub. '''
'''Don\'t set if you want to train a model from scratch.'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(snake_case )} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , )
SCREAMING_SNAKE_CASE = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
SCREAMING_SNAKE_CASE = field(default=snake_case , metadata={'''help''': '''Name or path of preprocessor config.'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Stride to use for the encoder.'''} , )
class lowerCamelCase__ :
def __init__( self ,A=192 ,A=32 ,A=4 ,A=0.6 ):
UpperCAmelCase = input_size
UpperCAmelCase = mask_patch_size
UpperCAmelCase = model_patch_size
UpperCAmelCase = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("""Input size must be divisible by mask patch size""" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("""Mask patch size must be divisible by model patch size""" )
UpperCAmelCase = self.input_size // self.mask_patch_size
UpperCAmelCase = self.mask_patch_size // self.model_patch_size
UpperCAmelCase = self.rand_size**2
UpperCAmelCase = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self ):
UpperCAmelCase = np.random.permutation(self.token_count )[: self.mask_count]
UpperCAmelCase = np.zeros(self.token_count ,dtype=A )
UpperCAmelCase = 1
UpperCAmelCase = mask.reshape((self.rand_size, self.rand_size) )
UpperCAmelCase = mask.repeat(self.scale ,axis=0 ).repeat(self.scale ,axis=1 )
return torch.tensor(mask.flatten() )
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = torch.stack([example["""pixel_values"""] for example in examples] )
UpperCAmelCase = torch.stack([example["""mask"""] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def _a ( ):
"""simple docstring"""
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mim""" , _snake_case , _snake_case )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(_snake_case )
transformers.utils.logging.set_verbosity(_snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCAmelCase = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _snake_case ) and data_args.train_val_split > 0.0:
UpperCAmelCase = ds["""train"""].train_test_split(data_args.train_val_split )
UpperCAmelCase = split["""train"""]
UpperCAmelCase = split["""test"""]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
UpperCAmelCase = AutoConfig.from_pretrained(model_args.config_name_or_path , **_snake_case )
elif model_args.model_name_or_path:
UpperCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , **_snake_case )
else:
UpperCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(_snake_case , """decoder_type""" ):
UpperCAmelCase = """simmim"""
# adapt config
UpperCAmelCase = model_args.image_size if model_args.image_size is not None else config.image_size
UpperCAmelCase = model_args.patch_size if model_args.patch_size is not None else config.patch_size
UpperCAmelCase = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"""image_size""": model_args.image_size,
"""patch_size""": model_args.patch_size,
"""encoder_stride""": model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
UpperCAmelCase = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **_snake_case )
elif model_args.model_name_or_path:
UpperCAmelCase = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **_snake_case )
else:
UpperCAmelCase = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
UpperCAmelCase = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
UpperCAmelCase = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
UpperCAmelCase = AutoModelForMaskedImageModeling.from_config(_snake_case )
if training_args.do_train:
UpperCAmelCase = ds["""train"""].column_names
else:
UpperCAmelCase = ds["""validation"""].column_names
if data_args.image_column_name is not None:
UpperCAmelCase = data_args.image_column_name
elif "image" in column_names:
UpperCAmelCase = """image"""
elif "img" in column_names:
UpperCAmelCase = """img"""
else:
UpperCAmelCase = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
UpperCAmelCase = Compose(
[
Lambda(lambda _snake_case : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
UpperCAmelCase = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(_snake_case ):
UpperCAmelCase = [transforms(_snake_case ) for image in examples[image_column_name]]
UpperCAmelCase = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
UpperCAmelCase = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_snake_case )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
UpperCAmelCase = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_snake_case )
# Initialize our trainer
UpperCAmelCase = Trainer(
model=_snake_case , args=_snake_case , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=_snake_case , data_collator=_snake_case , )
# Training
if training_args.do_train:
UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase = last_checkpoint
UpperCAmelCase = trainer.train(resume_from_checkpoint=_snake_case )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCAmelCase = trainer.evaluate()
trainer.log_metrics("""eval""" , _snake_case )
trainer.save_metrics("""eval""" , _snake_case )
# Write model card and (optionally) push to hub
UpperCAmelCase = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """masked-image-modeling""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-image-modeling"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_snake_case )
else:
trainer.create_model_card(**_snake_case )
if __name__ == "__main__":
main()
| 74 | 0 |
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_UpperCamelCase = argparse.ArgumentParser("""Stable Diffusion script with intel optimization""", add_help=False)
parser.add_argument("""--dpm""", action="""store_true""", help="""Enable DPMSolver or not""")
parser.add_argument("""--steps""", default=None, type=int, help="""Num inference steps""")
_UpperCamelCase = parser.parse_args()
_UpperCamelCase = "cpu"
_UpperCamelCase = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
_UpperCamelCase = "path-to-your-trained-model"
_UpperCamelCase = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_UpperCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_UpperCamelCase = pipe.to(device)
# to channels last
_UpperCamelCase = pipe.unet.to(memory_format=torch.channels_last)
_UpperCamelCase = pipe.vae.to(memory_format=torch.channels_last)
_UpperCamelCase = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_UpperCamelCase = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_UpperCamelCase = torch.randn(2, 4, 64, 64)
_UpperCamelCase = torch.rand(1) * 999
_UpperCamelCase = torch.randn(2, 77, 768)
_UpperCamelCase = (sample, timestep, encoder_hidden_status)
try:
_UpperCamelCase = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_UpperCamelCase = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_UpperCamelCase = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_UpperCamelCase = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_UpperCamelCase = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_UpperCamelCase = 666
_UpperCamelCase = torch.Generator(device).manual_seed(seed)
_UpperCamelCase = {"generator": generator}
if args.steps is not None:
_UpperCamelCase = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_UpperCamelCase = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("""generated.png""")
| 706 |
"""simple docstring"""
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
_UpperCamelCase = True
from torch.cuda.amp import autocast
_UpperCamelCase = logging.getLogger(__name__)
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Whether to log verbose messages or not.'''} , )
SCREAMING_SNAKE_CASE = field(
default=2.0 , metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} )
SCREAMING_SNAKE_CASE = field(
default=0.5 , metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} )
SCREAMING_SNAKE_CASE = field(
default=0.99_99_95 , metadata={'''help''': '''Decay of gumbel temperature during training.'''} )
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
UpperCAmelCase = logging.WARNING
if model_args.verbose_logging:
UpperCAmelCase = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
UpperCAmelCase = logging.INFO
logger.setLevel(_snake_case )
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
SCREAMING_SNAKE_CASE = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
SCREAMING_SNAKE_CASE = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
SCREAMING_SNAKE_CASE = field(
default='''file''' , metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
SCREAMING_SNAKE_CASE = field(
default=1 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
SCREAMING_SNAKE_CASE = field(
default=20.0 , metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} )
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = "longest"
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
def __call__( self ,A ):
# reformat list to dict and set to pytorch format
UpperCAmelCase = self.feature_extractor.pad(
A ,max_length=self.max_length ,padding=self.padding ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors="""pt""" ,)
UpperCAmelCase = self.model._get_feat_extract_output_lengths(batch["""input_values"""].shape[-1] )
UpperCAmelCase = batch["""input_values"""].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
UpperCAmelCase = self.model._get_feat_extract_output_lengths(batch["""attention_mask"""].sum(-1 ) ).to(
torch.long )
UpperCAmelCase = torch.zeros(
(batch_size, mask_indices_seq_length) ,dtype=torch.long ,device=batch["""input_values"""].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
UpperCAmelCase = 1
UpperCAmelCase = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
UpperCAmelCase = _compute_mask_indices(
(batch_size, mask_indices_seq_length) ,self.model.config.mask_time_prob ,self.model.config.mask_time_length ,attention_mask=A ,min_masks=2 ,)
return batch
class lowerCamelCase__ ( snake_case ):
def __init__( self ,*A ,A=1 ,A=0 ,A=1.0 ,**A ):
super().__init__(*A ,**A )
UpperCAmelCase = 0
UpperCAmelCase = max_gumbel_temp
UpperCAmelCase = min_gumbel_temp
UpperCAmelCase = gumbel_temp_decay
def _UpperCamelCase ( self ,A ,A ):
model.train()
UpperCAmelCase = self._prepare_inputs(A )
if self.use_amp:
with autocast():
UpperCAmelCase = self.compute_loss(A ,A )
else:
UpperCAmelCase = self.compute_loss(A ,A )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
UpperCAmelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
UpperCAmelCase = loss.sum() / (inputs["""mask_time_indices"""]).sum()
else:
raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
UpperCAmelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(A ).backward()
elif self.use_apex:
with amp.scale_loss(A ,self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(A )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) )
return loss.detach()
def _a ( ):
"""simple docstring"""
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
configure_logger(_snake_case , _snake_case )
# Downloading and loading a dataset from the hub.
UpperCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
UpperCAmelCase = DatasetDict()
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , )
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
UpperCAmelCase = DatasetDict()
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="""validation""" , cache_dir=model_args.cache_dir , )
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_snake_case )
def prepare_dataset(_snake_case ):
# check that all files have the correct sampling rate
UpperCAmelCase , UpperCAmelCase = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
UpperCAmelCase = datasets.map(
_snake_case , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["""train"""].column_names )
# filter audio files that are too long
UpperCAmelCase = vectorized_datasets.filter(
lambda _snake_case : len(data["""speech"""] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_snake_case ):
return feature_extractor(batch["""speech"""] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
UpperCAmelCase = vectorized_datasets.map(
_snake_case , batched=_snake_case , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["""train"""].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
UpperCAmelCase = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"""PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"""
""" ``config.feat_extract_norm='layer'""" )
UpperCAmelCase = WavaVecaForPreTraining(_snake_case )
UpperCAmelCase = DataCollatorForWavaVecaPretraining(model=_snake_case , feature_extractor=_snake_case )
UpperCAmelCase = WavaVecaPreTrainer(
model=_snake_case , data_collator=_snake_case , args=_snake_case , train_dataset=vectorized_datasets["""train"""] , eval_dataset=vectorized_datasets["""validation"""] , tokenizer=_snake_case , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 74 | 0 |
"""simple docstring"""
import warnings
warnings.warn(
"""memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: """
"""`from accelerate import find_executable_batch_size` to avoid this warning.""",
FutureWarning,
)
| 707 |
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class lowerCamelCase__ :
def __init__( self ,A ,):
UpperCAmelCase = parent
UpperCAmelCase = 13
UpperCAmelCase = 7
UpperCAmelCase = 30
UpperCAmelCase = self.seq_length + self.mem_len
UpperCAmelCase = 15
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = 99
UpperCAmelCase = [10, 50, 80]
UpperCAmelCase = 32
UpperCAmelCase = 32
UpperCAmelCase = 4
UpperCAmelCase = 8
UpperCAmelCase = 128
UpperCAmelCase = 2
UpperCAmelCase = 2
UpperCAmelCase = None
UpperCAmelCase = 1
UpperCAmelCase = 0
UpperCAmelCase = 3
UpperCAmelCase = self.vocab_size - 1
UpperCAmelCase = 0.01
def _UpperCamelCase ( self ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = TransfoXLConfig(
vocab_size=self.vocab_size ,mem_len=self.mem_len ,clamp_len=self.clamp_len ,cutoffs=self.cutoffs ,d_model=self.hidden_size ,d_embed=self.d_embed ,n_head=self.num_attention_heads ,d_head=self.d_head ,d_inner=self.d_inner ,div_val=self.div_val ,n_layer=self.num_hidden_layers ,eos_token_id=self.eos_token_id ,pad_token_id=self.vocab_size - 1 ,init_range=self.init_range ,num_labels=self.num_labels ,)
return (config, input_ids_a, input_ids_a, lm_labels)
def _UpperCamelCase ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def _UpperCamelCase ( self ,A ,A ,A ,A ):
UpperCAmelCase = TFTransfoXLModel(A )
UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple()
UpperCAmelCase = {"""input_ids""": input_ids_a, """mems""": mems_a}
UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
def _UpperCamelCase ( self ,A ,A ,A ,A ):
UpperCAmelCase = TFTransfoXLLMHeadModel(A )
UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple()
UpperCAmelCase = {"""input_ids""": input_ids_a, """labels""": lm_labels}
UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple()
UpperCAmelCase , UpperCAmelCase = model([input_ids_a, mems_a] ).to_tuple()
UpperCAmelCase = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
def _UpperCamelCase ( self ,A ,A ,A ,A ):
UpperCAmelCase = TFTransfoXLForSequenceClassification(A )
UpperCAmelCase = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.prepare_config_and_inputs()
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) = config_and_inputs
UpperCAmelCase = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( snake_case , snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
SCREAMING_SNAKE_CASE = () if is_tf_available() else ()
SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': TFTransfoXLModel,
'''text-classification''': TFTransfoXLForSequenceClassification,
'''text-generation''': TFTransfoXLLMHeadModel,
'''zero-shot''': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def _UpperCamelCase ( self ):
UpperCAmelCase = TFTransfoXLModelTester(self )
UpperCAmelCase = ConfigTester(self ,config_class=A ,d_embed=37 )
def _UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
self.model_tester.set_seed()
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*A )
def _UpperCamelCase ( self ):
self.model_tester.set_seed()
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(A )
assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
UpperCAmelCase = model.get_output_embeddings()
assert isinstance(A ,tf.keras.layers.Layer )
UpperCAmelCase = model.get_bias()
assert name is None
else:
UpperCAmelCase = model.get_output_embeddings()
assert x is None
UpperCAmelCase = model.get_bias()
assert name is None
def _UpperCamelCase ( self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def _UpperCamelCase ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = TFTransfoXLModel.from_pretrained(A )
self.assertIsNotNone(A )
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" )
def _UpperCamelCase ( self ):
pass
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def _UpperCamelCase ( self ):
UpperCAmelCase = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
UpperCAmelCase = tf.convert_to_tensor([[33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0]] ,dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
UpperCAmelCase = [33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0,33,1,1_857,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,28,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
UpperCAmelCase = model.generate(A ,max_length=200 ,do_sample=A )
self.assertListEqual(output_ids[0].numpy().tolist() ,A )
| 74 | 0 |
"""simple docstring"""
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase__ ( __A ):
def __init__( self ,A ,A=768 ):
super().__init__(UpperCamelCase__ )
UpperCAmelCase = proj_size
UpperCAmelCase = CLIPVisionModel(UpperCamelCase__ )
UpperCAmelCase = PaintByExampleMapper(UpperCamelCase__ )
UpperCAmelCase = nn.LayerNorm(config.hidden_size )
UpperCAmelCase = nn.Linear(config.hidden_size ,self.proj_size )
# uncondition for scaling
UpperCAmelCase = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def _UpperCamelCase ( self ,A ,A=False ):
UpperCAmelCase = self.model(pixel_values=UpperCamelCase__ )
UpperCAmelCase = clip_output.pooler_output
UpperCAmelCase = self.mapper(latent_states[:, None] )
UpperCAmelCase = self.final_layer_norm(UpperCamelCase__ )
UpperCAmelCase = self.proj_out(UpperCamelCase__ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class lowerCamelCase__ ( nn.Module ):
def __init__( self ,A ):
super().__init__()
UpperCAmelCase = (config.num_hidden_layers + 1) // 5
UpperCAmelCase = config.hidden_size
UpperCAmelCase = 1
UpperCAmelCase = nn.ModuleList(
[
BasicTransformerBlock(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,activation_fn="""gelu""" ,attention_bias=UpperCamelCase__ )
for _ in range(UpperCamelCase__ )
] )
def _UpperCamelCase ( self ,A ):
for block in self.blocks:
UpperCAmelCase = block(UpperCamelCase__ )
return hidden_states
| 708 |
"""simple docstring"""
from math import sqrt
def _a ( _snake_case = 100_0000 ):
"""simple docstring"""
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_snake_case , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""")
| 74 | 0 |
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="""%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s""",
datefmt="""%Y-%m-%d %H:%M:%S""",
level=os.environ.get("""LOGLEVEL""", """INFO""").upper(),
stream=sys.stdout,
)
_UpperCamelCase = logging.getLogger(__name__)
_UpperCamelCase = {"""facebook/bart-base""": BartForConditionalGeneration}
_UpperCamelCase = {"""facebook/bart-base""": BartTokenizer}
def _a ( ):
"""simple docstring"""
UpperCAmelCase = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" , type=_snake_case , default=_snake_case , help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" , type=_snake_case , default=5 , help="""The maximum total input sequence length after tokenization.""" , )
parser.add_argument(
"""--num_beams""" , type=_snake_case , default=_snake_case , help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) , )
parser.add_argument(
"""--model_name_or_path""" , type=_snake_case , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=_snake_case , )
parser.add_argument(
"""--config_name""" , type=_snake_case , default=_snake_case , help="""Pretrained config name or path if not the same as model_name""" , )
parser.add_argument(
"""--device""" , type=_snake_case , default="""cpu""" , help="""Device where the model will be run""" , )
parser.add_argument("""--output_file_path""" , type=_snake_case , default=_snake_case , help="""Where to store the final ONNX file.""" )
UpperCAmelCase = parser.parse_args()
return args
def _a ( _snake_case , _snake_case="cpu" ):
"""simple docstring"""
UpperCAmelCase = model_dict[model_name].from_pretrained(_snake_case ).to(_snake_case )
UpperCAmelCase = tokenizer_dict[model_name].from_pretrained(_snake_case )
if model_name in ["facebook/bart-base"]:
UpperCAmelCase = 0
UpperCAmelCase = None
UpperCAmelCase = 0
return huggingface_model, tokenizer
def _a ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
model.eval()
UpperCAmelCase = None
UpperCAmelCase = torch.jit.script(BARTBeamSearchGenerator(_snake_case ) )
with torch.no_grad():
UpperCAmelCase = """My friends are cool but they eat too many carbs."""
UpperCAmelCase = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors="""pt""" ).to(model.device )
UpperCAmelCase = model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , num_beams=_snake_case , max_length=_snake_case , early_stopping=_snake_case , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_snake_case , (
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _snake_case , opset_version=14 , input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] , output_names=["""output_ids"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} , example_outputs=_snake_case , )
logger.info("""Model exported to {}""".format(_snake_case ) )
UpperCAmelCase = remove_dup_initializers(os.path.abspath(_snake_case ) )
logger.info("""Deduplicated and optimized model written to {}""".format(_snake_case ) )
UpperCAmelCase = onnxruntime.InferenceSession(_snake_case )
UpperCAmelCase = ort_sess.run(
_snake_case , {
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(_snake_case ),
"""max_length""": np.array(_snake_case ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def _a ( ):
"""simple docstring"""
UpperCAmelCase = parse_args()
UpperCAmelCase = 5
UpperCAmelCase = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
UpperCAmelCase = torch.device(args.device )
UpperCAmelCase , UpperCAmelCase = load_model_tokenizer(args.model_name_or_path , _snake_case )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(_snake_case )
if args.max_length:
UpperCAmelCase = args.max_length
if args.num_beams:
UpperCAmelCase = args.num_beams
if args.output_file_path:
UpperCAmelCase = args.output_file_path
else:
UpperCAmelCase = """BART.onnx"""
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
if __name__ == "__main__":
main()
| 709 |
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
_UpperCamelCase = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
_UpperCamelCase = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def _a ( _snake_case , _snake_case=False ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = create_model(
"""HTSAT-tiny""" , """roberta""" , _snake_case , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=_snake_case , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = {}
UpperCAmelCase = R""".*sequential.(\d+).*"""
UpperCAmelCase = R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCAmelCase = key.replace(_snake_case , _snake_case )
if re.match(_snake_case , _snake_case ):
# replace sequential layers with list
UpperCAmelCase = re.match(_snake_case , _snake_case ).group(1 )
UpperCAmelCase = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(_snake_case )//3}.linear.''' )
elif re.match(_snake_case , _snake_case ):
UpperCAmelCase = int(re.match(_snake_case , _snake_case ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
UpperCAmelCase = 1 if projecton_layer == 0 else 2
UpperCAmelCase = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
UpperCAmelCase = value
UpperCAmelCase = mixed_qkv.size(0 ) // 3
UpperCAmelCase = mixed_qkv[:qkv_dim]
UpperCAmelCase = mixed_qkv[qkv_dim : qkv_dim * 2]
UpperCAmelCase = mixed_qkv[qkv_dim * 2 :]
UpperCAmelCase = query_layer
UpperCAmelCase = key_layer
UpperCAmelCase = value_layer
else:
UpperCAmelCase = value
return model_state_dict
def _a ( _snake_case , _snake_case , _snake_case , _snake_case=False ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = init_clap(_snake_case , enable_fusion=_snake_case )
clap_model.eval()
UpperCAmelCase = clap_model.state_dict()
UpperCAmelCase = rename_state_dict(_snake_case )
UpperCAmelCase = ClapConfig()
UpperCAmelCase = enable_fusion
UpperCAmelCase = ClapModel(_snake_case )
# ignore the spectrogram embedding layer
model.load_state_dict(_snake_case , strict=_snake_case )
model.save_pretrained(_snake_case )
transformers_config.save_pretrained(_snake_case )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
_UpperCamelCase = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 74 | 0 |
"""simple docstring"""
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
_UpperCamelCase = """bert-base-cased"""
_UpperCamelCase = """fp16"""
_UpperCamelCase = """bf16"""
_UpperCamelCase = [FPaa, BFaa]
@require_fsdp
@require_cuda
class lowerCamelCase__ ( lowercase_ ):
def _UpperCamelCase ( self ):
super().setUp()
UpperCAmelCase = dict(
ACCELERATE_USE_FSDP="""true""" ,MASTER_ADDR="""localhost""" ,MASTER_PORT="""10999""" ,RANK="""0""" ,LOCAL_RANK="""0""" ,WORLD_SIZE="""1""" ,)
def _UpperCamelCase ( self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(UpperCamelCase__ ):
UpperCAmelCase = self.dist_env.copy()
UpperCAmelCase = F'''{i + 1}'''
UpperCAmelCase = strategy
with mockenv_context(**UpperCamelCase__ ):
UpperCAmelCase = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy ,ShardingStrategy(i + 1 ) )
def _UpperCamelCase ( self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(UpperCamelCase__ ):
UpperCAmelCase = self.dist_env.copy()
UpperCAmelCase = prefetch_policy
with mockenv_context(**UpperCamelCase__ ):
UpperCAmelCase = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch ,BackwardPrefetch(i + 1 ) )
def _UpperCamelCase ( self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(UpperCamelCase__ ):
UpperCAmelCase = self.dist_env.copy()
UpperCAmelCase = state_dict_type
with mockenv_context(**UpperCamelCase__ ):
UpperCAmelCase = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type ,StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def _UpperCamelCase ( self ):
UpperCAmelCase = AutoModel.from_pretrained(UpperCamelCase__ )
for policy in FSDP_AUTO_WRAP_POLICY:
UpperCAmelCase = self.dist_env.copy()
UpperCAmelCase = policy
if policy == "TRANSFORMER_BASED_WRAP":
UpperCAmelCase = """BertLayer"""
elif policy == "SIZE_BASED_WRAP":
UpperCAmelCase = """2000"""
with mockenv_context(**UpperCamelCase__ ):
UpperCAmelCase = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(UpperCamelCase__ )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
UpperCAmelCase = self.dist_env.copy()
UpperCAmelCase = """TRANSFORMER_BASED_WRAP"""
UpperCAmelCase = """T5Layer"""
with mockenv_context(**UpperCamelCase__ ):
UpperCAmelCase = FullyShardedDataParallelPlugin()
with self.assertRaises(UpperCamelCase__ ) as cm:
fsdp_plugin.set_auto_wrap_policy(UpperCamelCase__ )
self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception ) )
UpperCAmelCase = self.dist_env.copy()
UpperCAmelCase = """SIZE_BASED_WRAP"""
UpperCAmelCase = """0"""
with mockenv_context(**UpperCamelCase__ ):
UpperCAmelCase = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(UpperCamelCase__ )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def _UpperCamelCase ( self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
UpperCAmelCase = self.dist_env.copy()
UpperCAmelCase = mp_dtype
with mockenv_context(**UpperCamelCase__ ):
UpperCAmelCase = Accelerator()
if mp_dtype == "fp16":
UpperCAmelCase = torch.floataa
elif mp_dtype == "bf16":
UpperCAmelCase = torch.bfloataa
UpperCAmelCase = MixedPrecision(param_dtype=UpperCamelCase__ ,reduce_dtype=UpperCamelCase__ ,buffer_dtype=UpperCamelCase__ )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy ,UpperCamelCase__ )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler ,UpperCamelCase__ ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(UpperCamelCase__ )
def _UpperCamelCase ( self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
UpperCAmelCase = self.dist_env.copy()
UpperCAmelCase = str(UpperCamelCase__ ).lower()
with mockenv_context(**UpperCamelCase__ ):
UpperCAmelCase = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload ,CPUOffload(offload_params=UpperCamelCase__ ) )
@require_fsdp
@require_multi_gpu
@slow
class lowerCamelCase__ ( lowercase_ ):
def _UpperCamelCase ( self ):
super().setUp()
UpperCAmelCase = 0.82
UpperCAmelCase = [
"""fsdp_shard_grad_op_transformer_based_wrap""",
"""fsdp_full_shard_transformer_based_wrap""",
]
UpperCAmelCase = {
"""multi_gpu_fp16""": 3_200,
"""fsdp_shard_grad_op_transformer_based_wrap_fp16""": 2_000,
"""fsdp_full_shard_transformer_based_wrap_fp16""": 1_900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
UpperCAmelCase = 160
UpperCAmelCase = 160
UpperCAmelCase = inspect.getfile(accelerate.test_utils )
UpperCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = os.path.join(self.test_scripts_folder ,"""test_performance.py""" )
UpperCAmelCase = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""]
for config in self.performance_configs:
UpperCAmelCase = cmd.copy()
for i, strategy in enumerate(UpperCamelCase__ ):
if strategy.lower() in config:
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
break
if "fp32" in config:
cmd_config.append("""--mixed_precision=no""" )
else:
cmd_config.append("""--mixed_precision=fp16""" )
if "cpu_offload" in config:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
F'''--performance_lower_bound={self.performance_lower_bound}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ ,env=os.environ.copy() )
def _UpperCamelCase ( self ):
UpperCAmelCase = os.path.join(self.test_scripts_folder ,"""test_checkpointing.py""" )
UpperCAmelCase = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
"""--use_fsdp""",
"""--mixed_precision=fp16""",
"""--fsdp_transformer_layer_cls_to_wrap=BertLayer""",
]
for i, strategy in enumerate(UpperCamelCase__ ):
UpperCAmelCase = cmd.copy()
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
if strategy != "FULL_SHARD":
continue
UpperCAmelCase = len(UpperCamelCase__ )
for state_dict_type in FSDP_STATE_DICT_TYPE:
UpperCAmelCase = cmd_config[:state_dict_config_index]
cmd_config.append(F'''--fsdp_state_dict_type={state_dict_type}''' )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
"""--partial_train_epoch=1""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ ,env=os.environ.copy() )
UpperCAmelCase = cmd_config[:-1]
UpperCAmelCase = os.path.join(self.tmpdir ,"""epoch_0""" )
cmd_config.extend(
[
F'''--resume_from_checkpoint={resume_from_checkpoint}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ ,env=os.environ.copy() )
def _UpperCamelCase ( self ):
UpperCAmelCase = os.path.join(self.test_scripts_folder ,"""test_peak_memory_usage.py""" )
UpperCAmelCase = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
UpperCAmelCase = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["""--mixed_precision=fp16"""] )
else:
cmd_config.extend(["""--mixed_precision=no"""] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["""--use_fsdp"""] )
for i, strategy in enumerate(UpperCamelCase__ ):
if strategy.lower() in spec:
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
break
if "cpu_offload" in spec:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
F'''--peak_memory_upper_bound={peak_mem_upper_bound}''',
F'''--n_train={self.n_train}''',
F'''--n_val={self.n_val}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ ,env=os.environ.copy() )
| 710 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_UpperCamelCase = abspath(join(dirname(dirname(__file__)), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def _a ( _snake_case ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_snake_case )
def _a ( _snake_case ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
UpperCAmelCase = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(_snake_case , id=_snake_case )
| 74 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self ):
for model_name in ["bert-base-uncased"]:
UpperCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
UpperCAmelCase = TFAutoModel.from_pretrained(lowerCAmelCase_ ,from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
UpperCAmelCase = AutoModel.from_pretrained(lowerCAmelCase_ ,from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
@slow
def _UpperCamelCase ( self ):
for model_name in ["bert-base-uncased"]:
UpperCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
UpperCAmelCase = TFAutoModelForPreTraining.from_pretrained(lowerCAmelCase_ ,from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
UpperCAmelCase = AutoModelForPreTraining.from_pretrained(lowerCAmelCase_ ,from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
@slow
def _UpperCamelCase ( self ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
UpperCAmelCase = TFAutoModelForCausalLM.from_pretrained(lowerCAmelCase_ ,from_pt=lowerCAmelCase_ )
UpperCAmelCase , UpperCAmelCase = TFAutoModelForCausalLM.from_pretrained(
lowerCAmelCase_ ,output_loading_info=lowerCAmelCase_ ,from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
UpperCAmelCase = AutoModelForCausalLM.from_pretrained(lowerCAmelCase_ ,from_tf=lowerCAmelCase_ )
UpperCAmelCase , UpperCAmelCase = AutoModelForCausalLM.from_pretrained(
lowerCAmelCase_ ,output_loading_info=lowerCAmelCase_ ,from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
@slow
def _UpperCamelCase ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
UpperCAmelCase = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase_ ,from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
UpperCAmelCase = AutoModelWithLMHead.from_pretrained(lowerCAmelCase_ ,from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
@slow
def _UpperCamelCase ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
UpperCAmelCase = TFAutoModelForMaskedLM.from_pretrained(lowerCAmelCase_ ,from_pt=lowerCAmelCase_ )
UpperCAmelCase , UpperCAmelCase = TFAutoModelForMaskedLM.from_pretrained(
lowerCAmelCase_ ,output_loading_info=lowerCAmelCase_ ,from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
UpperCAmelCase = AutoModelForMaskedLM.from_pretrained(lowerCAmelCase_ ,from_tf=lowerCAmelCase_ )
UpperCAmelCase , UpperCAmelCase = AutoModelForMaskedLM.from_pretrained(
lowerCAmelCase_ ,output_loading_info=lowerCAmelCase_ ,from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
@slow
def _UpperCamelCase ( self ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ ,from_pt=lowerCAmelCase_ )
UpperCAmelCase , UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(
lowerCAmelCase_ ,output_loading_info=lowerCAmelCase_ ,from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ ,from_tf=lowerCAmelCase_ )
UpperCAmelCase , UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(
lowerCAmelCase_ ,output_loading_info=lowerCAmelCase_ ,from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
@slow
def _UpperCamelCase ( self ):
for model_name in ["bert-base-uncased"]:
UpperCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
UpperCAmelCase = TFAutoModelForSequenceClassification.from_pretrained(lowerCAmelCase_ ,from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase_ ,from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
@slow
def _UpperCamelCase ( self ):
for model_name in ["bert-base-uncased"]:
UpperCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
UpperCAmelCase = TFAutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase_ ,from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
UpperCAmelCase = AutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase_ ,from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
def _UpperCamelCase ( self ):
UpperCAmelCase = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase_ ,from_pt=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) ,14_410 )
UpperCAmelCase = AutoModelWithLMHead.from_pretrained(lowerCAmelCase_ ,from_tf=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) ,14_410 )
def _UpperCamelCase ( self ):
UpperCAmelCase = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase_ ,from_pt=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) ,14_410 )
UpperCAmelCase = AutoModelWithLMHead.from_pretrained(lowerCAmelCase_ ,from_tf=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) ,14_410 )
| 711 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import MutableSequence
class lowerCamelCase__ :
def __init__( self ,A ,A ):
if len(A ) != degree + 1:
raise ValueError(
"""The number of coefficients should be equal to the degree + 1.""" )
UpperCAmelCase = list(A )
UpperCAmelCase = degree
def __add__( self ,A ):
if self.degree > polynomial_a.degree:
UpperCAmelCase = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree ,A )
else:
UpperCAmelCase = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree ,A )
def __sub__( self ,A ):
return self + polynomial_a * Polynomial(0 ,[-1] )
def __neg__( self ):
return Polynomial(self.degree ,[-c for c in self.coefficients] )
def __mul__( self ,A ):
UpperCAmelCase = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree ,A )
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ):
UpperCAmelCase = """"""
for i in range(self.degree ,-1 ,-1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(A )
return polynomial
def __repr__( self ):
return self.__str__()
def _UpperCamelCase ( self ):
UpperCAmelCase = [0] * self.degree
for i in range(self.degree ):
UpperCAmelCase = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 ,A )
def _UpperCamelCase ( self ,A = 0 ):
UpperCAmelCase = [0] * (self.degree + 2)
UpperCAmelCase = constant
for i in range(self.degree + 1 ):
UpperCAmelCase = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 ,A )
def __eq__( self ,A ):
if not isinstance(A ,A ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self ,A ):
return not self.__eq__(A )
| 74 | 0 |
"""simple docstring"""
from itertools import count
def _a ( _snake_case = 50 ):
"""simple docstring"""
UpperCAmelCase = [1] * min_block_length
for n in count(a_ ):
fill_count_functions.append(1 )
for block_length in range(a_ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 712 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class lowerCamelCase__ :
def __init__( self ,A ,):
UpperCAmelCase = parent
UpperCAmelCase = 13
UpperCAmelCase = 7
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = 2
UpperCAmelCase = 99
UpperCAmelCase = 0
UpperCAmelCase = 32
UpperCAmelCase = 2
UpperCAmelCase = 4
UpperCAmelCase = 0.1
UpperCAmelCase = 0.1
UpperCAmelCase = 512
UpperCAmelCase = 16
UpperCAmelCase = 2
UpperCAmelCase = 0.02
UpperCAmelCase = 3
UpperCAmelCase = 4
UpperCAmelCase = """last"""
UpperCAmelCase = True
UpperCAmelCase = None
UpperCAmelCase = 0
def _UpperCamelCase ( self ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ,dtype=tf.floataa )
UpperCAmelCase = None
if self.use_input_lengths:
UpperCAmelCase = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] ,2 ,dtype=tf.floataa )
UpperCAmelCase = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase = FlaubertConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,bos_token_id=self.bos_token_id ,)
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = TFFlaubertModel(config=A )
UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCAmelCase = model(A )
UpperCAmelCase = [input_ids, input_mask]
UpperCAmelCase = model(A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = TFFlaubertWithLMHeadModel(A )
UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCAmelCase = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = TFFlaubertForQuestionAnsweringSimple(A )
UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCAmelCase = model(A )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = TFFlaubertForSequenceClassification(A )
UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCAmelCase = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = self.num_labels
UpperCAmelCase = TFFlaubertForTokenClassification(config=A )
UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCAmelCase = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = self.num_choices
UpperCAmelCase = TFFlaubertForMultipleChoice(config=A )
UpperCAmelCase = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
UpperCAmelCase = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""langs""": token_type_ids,
"""lengths""": input_lengths,
}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( snake_case , snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': TFFlaubertModel,
'''fill-mask''': TFFlaubertWithLMHeadModel,
'''question-answering''': TFFlaubertForQuestionAnsweringSimple,
'''text-classification''': TFFlaubertForSequenceClassification,
'''token-classification''': TFFlaubertForTokenClassification,
'''zero-shot''': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _UpperCamelCase ( self ):
UpperCAmelCase = TFFlaubertModelTester(self )
UpperCAmelCase = ConfigTester(self ,config_class=A ,emb_dim=37 )
def _UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*A )
@slow
def _UpperCamelCase ( self ):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = TFFlaubertModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self ):
UpperCAmelCase = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
UpperCAmelCase = tf.convert_to_tensor(
[[0, 158, 735, 2_592, 1_424, 6_727, 82, 1]] ,dtype=tf.intaa ,) # "J'aime flaubert !"
UpperCAmelCase = model(A )[0]
UpperCAmelCase = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape ,A )
# compare the actual values for a slice.
UpperCAmelCase = tf.convert_to_tensor(
[
[
[-1.8768773, -1.566555, 0.27072418],
[-1.6920038, -0.5873505, 1.9329599],
[-2.9563985, -1.6993835, 1.7972052],
]
] ,dtype=tf.floataa ,)
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 74 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCamelCase = {
"""configuration_blenderbot_small""": [
"""BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotSmallConfig""",
"""BlenderbotSmallOnnxConfig""",
],
"""tokenization_blenderbot_small""": ["""BlenderbotSmallTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ["""BlenderbotSmallTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotSmallForCausalLM""",
"""BlenderbotSmallForConditionalGeneration""",
"""BlenderbotSmallModel""",
"""BlenderbotSmallPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""TFBlenderbotSmallForConditionalGeneration""",
"""TFBlenderbotSmallModel""",
"""TFBlenderbotSmallPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""FlaxBlenderbotSmallForConditionalGeneration""",
"""FlaxBlenderbotSmallModel""",
"""FlaxBlenderbotSmallPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 713 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith("""module.encoder""" ):
UpperCAmelCase = key.replace("""module.encoder""" , """glpn.encoder""" )
if key.startswith("""module.decoder""" ):
UpperCAmelCase = key.replace("""module.decoder""" , """decoder.stages""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCAmelCase = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
UpperCAmelCase = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(_snake_case )-1}''' )
if "norm" in key:
UpperCAmelCase = key.replace("""norm""" , """layer_norm""" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCAmelCase = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )]
UpperCAmelCase = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(_snake_case )-1}''' )
if "layer_norm1" in key:
UpperCAmelCase = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
UpperCAmelCase = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
UpperCAmelCase = key[key.find("""block""" ) + len("""block""" )]
UpperCAmelCase = key.replace(F'''block{idx}''' , F'''block.{int(_snake_case )-1}''' )
if "attn.q" in key:
UpperCAmelCase = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
UpperCAmelCase = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
UpperCAmelCase = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
UpperCAmelCase = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
UpperCAmelCase = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
UpperCAmelCase = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
UpperCAmelCase = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
UpperCAmelCase = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCAmelCase = key[key.find("""linear_c""" ) + len("""linear_c""" )]
UpperCAmelCase = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(_snake_case )-1}''' )
if "bot_conv" in key:
UpperCAmelCase = key.replace("""bot_conv""" , """0.convolution""" )
if "skip_conv1" in key:
UpperCAmelCase = key.replace("""skip_conv1""" , """1.convolution""" )
if "skip_conv2" in key:
UpperCAmelCase = key.replace("""skip_conv2""" , """2.convolution""" )
if "fusion1" in key:
UpperCAmelCase = key.replace("""fusion1""" , """1.fusion""" )
if "fusion2" in key:
UpperCAmelCase = key.replace("""fusion2""" , """2.fusion""" )
if "fusion3" in key:
UpperCAmelCase = key.replace("""fusion3""" , """3.fusion""" )
if "fusion" in key and "conv" in key:
UpperCAmelCase = key.replace("""conv""" , """convolutional_layer""" )
if key.startswith("""module.last_layer_depth""" ):
UpperCAmelCase = key.replace("""module.last_layer_depth""" , """head.head""" )
UpperCAmelCase = value
return new_state_dict
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCAmelCase = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
UpperCAmelCase = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
UpperCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
UpperCAmelCase = kv_bias[: config.hidden_sizes[i]]
UpperCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCAmelCase = kv_bias[config.hidden_sizes[i] :]
def _a ( ):
"""simple docstring"""
UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return image
@torch.no_grad()
def _a ( _snake_case , _snake_case , _snake_case=False , _snake_case=None ):
"""simple docstring"""
UpperCAmelCase = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
UpperCAmelCase = GLPNImageProcessor()
# prepare image
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=_snake_case , return_tensors="""pt""" ).pixel_values
logger.info("""Converting model...""" )
# load original state dict
UpperCAmelCase = torch.load(_snake_case , map_location=torch.device("""cpu""" ) )
# rename keys
UpperCAmelCase = rename_keys(_snake_case )
# key and value matrices need special treatment
read_in_k_v(_snake_case , _snake_case )
# create HuggingFace model and load state dict
UpperCAmelCase = GLPNForDepthEstimation(_snake_case )
model.load_state_dict(_snake_case )
model.eval()
# forward pass
UpperCAmelCase = model(_snake_case )
UpperCAmelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
UpperCAmelCase = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
UpperCAmelCase = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
UpperCAmelCase = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , _snake_case , atol=1E-4 )
print("""Looks ok!""" )
# finally, push to hub if required
if push_to_hub:
logger.info("""Pushing model and image processor to the hub...""" )
model.push_to_hub(
repo_path_or_name=Path(_snake_case , _snake_case ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_snake_case , )
image_processor.push_to_hub(
repo_path_or_name=Path(_snake_case , _snake_case ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_snake_case , )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
_UpperCamelCase = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 74 | 0 |
"""simple docstring"""
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_UpperCamelCase = """\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_UpperCamelCase = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
_UpperCamelCase = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
def _UpperCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ,id="""token""" ) ,id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" ,id="""token""" ) ,id="""sequence""" ) ,id="""references""" ),
} ) ,)
def _UpperCamelCase ( self ,A ,A ,A = 1 ,A = 4 ,):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__UpperCamelCase ,hypotheses=__UpperCamelCase ,min_len=__UpperCamelCase ,max_len=__UpperCamelCase )
}
| 714 |
"""simple docstring"""
def _a ( _snake_case ): # noqa: E741
"""simple docstring"""
UpperCAmelCase = len(_snake_case )
UpperCAmelCase = 0
UpperCAmelCase = [0] * n
UpperCAmelCase = [False] * n
UpperCAmelCase = [False] * n
def dfs(_snake_case , _snake_case , _snake_case , _snake_case ):
if parent == root:
out_edge_count += 1
UpperCAmelCase = True
UpperCAmelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
UpperCAmelCase = dfs(_snake_case , _snake_case , _snake_case , _snake_case )
UpperCAmelCase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
UpperCAmelCase = True
# AP found via cycle
if at == low[to]:
UpperCAmelCase = True
else:
UpperCAmelCase = min(low[at] , _snake_case )
return out_edge_count
for i in range(_snake_case ):
if not visited[i]:
UpperCAmelCase = 0
UpperCAmelCase = dfs(_snake_case , _snake_case , -1 , _snake_case )
UpperCAmelCase = out_edge_count > 1
for x in range(len(_snake_case ) ):
if is_art[x] is True:
print(_snake_case )
# Adjacency list of graph
_UpperCamelCase = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 74 | 0 |
"""simple docstring"""
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = [], []
while len(_UpperCamelCase ) > 1:
UpperCAmelCase , UpperCAmelCase = min(_UpperCamelCase ), max(_UpperCamelCase )
start.append(_UpperCamelCase )
end.append(_UpperCamelCase )
collection.remove(_UpperCamelCase )
collection.remove(_UpperCamelCase )
end.reverse()
return start + collection + end
if __name__ == "__main__":
_UpperCamelCase = input("""Enter numbers separated by a comma:\n""").strip()
_UpperCamelCase = [int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 715 |
"""simple docstring"""
_UpperCamelCase = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_UpperCamelCase = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
_UpperCamelCase = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 74 | 0 |
"""simple docstring"""
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 716 |
"""simple docstring"""
import argparse
import struct
import unittest
class lowerCamelCase__ :
def __init__( self ,A ):
UpperCAmelCase = data
# Initialize hash values
UpperCAmelCase = [
0x6A_09_E6_67,
0xBB_67_AE_85,
0x3C_6E_F3_72,
0xA5_4F_F5_3A,
0x51_0E_52_7F,
0x9B_05_68_8C,
0x1F_83_D9_AB,
0x5B_E0_CD_19,
]
# Initialize round constants
UpperCAmelCase = [
0x42_8A_2F_98,
0x71_37_44_91,
0xB5_C0_FB_CF,
0xE9_B5_DB_A5,
0x39_56_C2_5B,
0x59_F1_11_F1,
0x92_3F_82_A4,
0xAB_1C_5E_D5,
0xD8_07_AA_98,
0x12_83_5B_01,
0x24_31_85_BE,
0x55_0C_7D_C3,
0x72_BE_5D_74,
0x80_DE_B1_FE,
0x9B_DC_06_A7,
0xC1_9B_F1_74,
0xE4_9B_69_C1,
0xEF_BE_47_86,
0x0F_C1_9D_C6,
0x24_0C_A1_CC,
0x2D_E9_2C_6F,
0x4A_74_84_AA,
0x5C_B0_A9_DC,
0x76_F9_88_DA,
0x98_3E_51_52,
0xA8_31_C6_6D,
0xB0_03_27_C8,
0xBF_59_7F_C7,
0xC6_E0_0B_F3,
0xD5_A7_91_47,
0x06_CA_63_51,
0x14_29_29_67,
0x27_B7_0A_85,
0x2E_1B_21_38,
0x4D_2C_6D_FC,
0x53_38_0D_13,
0x65_0A_73_54,
0x76_6A_0A_BB,
0x81_C2_C9_2E,
0x92_72_2C_85,
0xA2_BF_E8_A1,
0xA8_1A_66_4B,
0xC2_4B_8B_70,
0xC7_6C_51_A3,
0xD1_92_E8_19,
0xD6_99_06_24,
0xF4_0E_35_85,
0x10_6A_A0_70,
0x19_A4_C1_16,
0x1E_37_6C_08,
0x27_48_77_4C,
0x34_B0_BC_B5,
0x39_1C_0C_B3,
0x4E_D8_AA_4A,
0x5B_9C_CA_4F,
0x68_2E_6F_F3,
0x74_8F_82_EE,
0x78_A5_63_6F,
0x84_C8_78_14,
0x8C_C7_02_08,
0x90_BE_FF_FA,
0xA4_50_6C_EB,
0xBE_F9_A3_F7,
0xC6_71_78_F2,
]
UpperCAmelCase = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _UpperCamelCase ( A ):
UpperCAmelCase = b"""\x80""" + (b"""\x00""" * (63 - (len(A ) + 8) % 64))
UpperCAmelCase = struct.pack(""">Q""" ,(len(A ) * 8) )
return data + padding + big_endian_integer
def _UpperCamelCase ( self ):
# Convert into blocks of 64 bytes
UpperCAmelCase = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCAmelCase = list(struct.unpack(""">16L""" ,A ) )
# add 48 0-ed integers
words += [0] * 48
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
UpperCAmelCase = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
UpperCAmelCase = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
UpperCAmelCase = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_00_00_00_00
# Compression
UpperCAmelCase = self.ror(A ,6 ) ^ self.ror(A ,11 ) ^ self.ror(A ,25 )
UpperCAmelCase = (e & f) ^ ((~e & 0xFF_FF_FF_FF) & g)
UpperCAmelCase = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_00_00_00_00
UpperCAmelCase = self.ror(A ,2 ) ^ self.ror(A ,13 ) ^ self.ror(A ,22 )
UpperCAmelCase = (a & b) ^ (a & c) ^ (b & c)
UpperCAmelCase = (sa + maj) % 0x1_00_00_00_00
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = (
g,
f,
e,
((d + tempa) % 0x1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0x1_00_00_00_00),
)
UpperCAmelCase = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCAmelCase = [
((element + mutated_hash_values[index]) % 0x1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
UpperCAmelCase = """""".join([hex(A )[2:].zfill(8 ) for value in self.hashes] )
def _UpperCamelCase ( self ,A ,A ):
return 0xFF_FF_FF_FF & (value << (32 - rotations)) | (value >> rotations)
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCamelCase ( self ):
import hashlib
UpperCAmelCase = bytes("""Test String""" ,"""utf-8""" )
self.assertEqual(SHAaaa(A ).hash ,hashlib.shaaaa(A ).hexdigest() )
def _a ( ):
"""simple docstring"""
import doctest
doctest.testmod()
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
UpperCAmelCase = f.read()
else:
UpperCAmelCase = bytes(_snake_case , """utf-8""" )
print(SHAaaa(_snake_case ).hash )
if __name__ == "__main__":
main()
| 74 | 0 |
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
_UpperCamelCase = {
# 1536-bit
5: {
"prime": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF""",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AACAA68FFFFFFFFFFFFFFFF""",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF""",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"""
+ """88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"""
+ """2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"""
+ """287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"""
+ """1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"""
+ """93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"""
+ """FFFFFFFFFFFFFFFF""",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"""
+ """8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"""
+ """302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"""
+ """A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"""
+ """49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"""
+ """FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"""
+ """180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"""
+ """3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"""
+ """04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"""
+ """B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"""
+ """1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"""
+ """E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"""
+ """99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"""
+ """04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"""
+ """233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"""
+ """D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"""
+ """36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"""
+ """AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"""
+ """DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"""
+ """2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"""
+ """F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"""
+ """BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"""
+ """CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"""
+ """B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"""
+ """387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"""
+ """6DCC4024FFFFFFFFFFFFFFFF""",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"""
+ """88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"""
+ """2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"""
+ """287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"""
+ """1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"""
+ """93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"""
+ """36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"""
+ """F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"""
+ """179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"""
+ """DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"""
+ """5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"""
+ """D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"""
+ """23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"""
+ """CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"""
+ """06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"""
+ """DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"""
+ """12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"""
+ """38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"""
+ """741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"""
+ """3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"""
+ """22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"""
+ """4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"""
+ """062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"""
+ """4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"""
+ """B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"""
+ """4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"""
+ """9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"""
+ """60C980DD98EDD3DFFFFFFFFFFFFFFFFF""",
base=16,
),
"generator": 2,
},
}
class lowerCamelCase__ :
def __init__( self ,A = 14 ):
if group not in primes:
raise ValueError("""Unsupported Group""" )
UpperCAmelCase = primes[group]['''prime''']
UpperCAmelCase = primes[group]['''generator''']
UpperCAmelCase = int(hexlify(urandom(32 ) ) ,base=16 )
def _UpperCamelCase ( self ):
return hex(self.__private_key )[2:]
def _UpperCamelCase ( self ):
UpperCAmelCase = pow(self.generator ,self.__private_key ,self.prime )
return hex(lowerCamelCase__ )[2:]
def _UpperCamelCase ( self ,A ):
return (
2 <= key <= self.prime - 2
and pow(lowerCamelCase__ ,(self.prime - 1) // 2 ,self.prime ) == 1
)
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = int(lowerCamelCase__ ,base=16 )
if not self.is_valid_public_key(lowerCamelCase__ ):
raise ValueError("""Invalid public key""" )
UpperCAmelCase = pow(lowerCamelCase__ ,self.__private_key ,self.prime )
return shaaaa(str(lowerCamelCase__ ).encode() ).hexdigest()
@staticmethod
def _UpperCamelCase ( A ,A ):
return (
2 <= remote_public_key_str <= prime - 2
and pow(lowerCamelCase__ ,(prime - 1) // 2 ,lowerCamelCase__ ) == 1
)
@staticmethod
def _UpperCamelCase ( A ,A ,A = 14 ):
UpperCAmelCase = int(lowerCamelCase__ ,base=16 )
UpperCAmelCase = int(lowerCamelCase__ ,base=16 )
UpperCAmelCase = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(lowerCamelCase__ ,lowerCamelCase__ ):
raise ValueError("""Invalid public key""" )
UpperCAmelCase = pow(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
return shaaaa(str(lowerCamelCase__ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
"""simple docstring"""
def _a ( _snake_case = 10 , _snake_case = 22 ):
"""simple docstring"""
UpperCAmelCase = range(1 , _snake_case )
UpperCAmelCase = range(1 , _snake_case )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"""{solution(10, 22) = }""")
| 74 | 0 |
"""simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCamelCase ( self ):
UpperCAmelCase = ['''a''', '''b''', '''c''']
# Defaults to last layer if both are None
UpperCAmelCase = get_aligned_output_features_output_indices(A ,A ,A )
self.assertEqual(A ,["""c"""] )
self.assertEqual(A ,[2] )
# Out indices set to match out features
UpperCAmelCase = get_aligned_output_features_output_indices(["""a""", """c"""] ,A ,A )
self.assertEqual(A ,["""a""", """c"""] )
self.assertEqual(A ,[0, 2] )
# Out features set to match out indices
UpperCAmelCase = get_aligned_output_features_output_indices(A ,[0, 2] ,A )
self.assertEqual(A ,["""a""", """c"""] )
self.assertEqual(A ,[0, 2] )
# Out features selected from negative indices
UpperCAmelCase = get_aligned_output_features_output_indices(A ,[-3, -1] ,A )
self.assertEqual(A ,["""a""", """c"""] )
self.assertEqual(A ,[-3, -1] )
def _UpperCamelCase ( self ):
# Stage names must be set
with self.assertRaises(A ):
verify_out_features_out_indices(["""a""", """b"""] ,(0, 1) ,A )
# Out features must be a list
with self.assertRaises(A ):
verify_out_features_out_indices(("""a""", """b""") ,(0, 1) ,["""a""", """b"""] )
# Out features must be a subset of stage names
with self.assertRaises(A ):
verify_out_features_out_indices(["""a""", """b"""] ,(0, 1) ,["""a"""] )
# Out indices must be a list or tuple
with self.assertRaises(A ):
verify_out_features_out_indices(A ,0 ,["""a""", """b"""] )
# Out indices must be a subset of stage names
with self.assertRaises(A ):
verify_out_features_out_indices(A ,(0, 1) ,["""a"""] )
# Out features and out indices must be the same length
with self.assertRaises(A ):
verify_out_features_out_indices(["""a""", """b"""] ,(0,) ,["""a""", """b""", """c"""] )
# Out features should match out indices
with self.assertRaises(A ):
verify_out_features_out_indices(["""a""", """b"""] ,(0, 2) ,["""a""", """b""", """c"""] )
# Out features and out indices should be in order
with self.assertRaises(A ):
verify_out_features_out_indices(["""b""", """a"""] ,(0, 1) ,["""a""", """b"""] )
# Check passes with valid inputs
verify_out_features_out_indices(["""a""", """b""", """d"""] ,(0, 1, -1) ,["""a""", """b""", """c""", """d"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = BackboneMixin()
UpperCAmelCase = ['''a''', '''b''', '''c''']
UpperCAmelCase = ['''a''', '''c''']
UpperCAmelCase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features ,["""a""", """c"""] )
self.assertEqual(backbone.out_indices ,[0, 2] )
# Check out features and indices are updated correctly
UpperCAmelCase = ['''a''', '''b''']
self.assertEqual(backbone.out_features ,["""a""", """b"""] )
self.assertEqual(backbone.out_indices ,[0, 1] )
UpperCAmelCase = [-3, -1]
self.assertEqual(backbone.out_features ,["""a""", """c"""] )
self.assertEqual(backbone.out_indices ,[-3, -1] )
| 718 |
"""simple docstring"""
from __future__ import annotations
def _a ( _snake_case ):
"""simple docstring"""
return len(set(_snake_case ) ) == len(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 | 0 |
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class lowerCamelCase__ :
def __init__( self ):
UpperCAmelCase = psutil.Process()
UpperCAmelCase = False
def _UpperCamelCase ( self ):
UpperCAmelCase = -1
while True:
UpperCAmelCase = max(self.process.memory_info().rss ,self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def _UpperCamelCase ( self ):
UpperCAmelCase = True
UpperCAmelCase = threading.Thread(target=self.peak_monitor )
UpperCAmelCase = True
self.thread.start()
def _UpperCamelCase ( self ):
UpperCAmelCase = False
self.thread.join()
return self.cpu_memory_peak
_UpperCamelCase = PeakCPUMemory()
def _a ( ):
"""simple docstring"""
UpperCAmelCase = {"""time""": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
UpperCAmelCase = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
UpperCAmelCase = torch.cuda.memory_allocated(UpperCamelCase__ )
torch.cuda.reset_peak_memory_stats()
return measures
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = {"""time""": time.time() - start_measures["""time"""]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
UpperCAmelCase = (psutil.Process().memory_info().rss - start_measures["""cpu"""]) / 2**20
UpperCAmelCase = (cpu_peak_tracker.stop() - start_measures["""cpu"""]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
UpperCAmelCase = (torch.cuda.memory_allocated(UpperCamelCase__ ) - start_measures[str(UpperCamelCase__ )]) / 2**20
UpperCAmelCase = (torch.cuda.max_memory_allocated(UpperCamelCase__ ) - start_measures[str(UpperCamelCase__ )]) / 2**20
return measures
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
print(F'''{description}:''' )
print(F'''- Time: {measures['time']:.2f}s''' )
for i in range(torch.cuda.device_count() ):
print(F'''- GPU {i} allocated: {measures[str(UpperCamelCase__ )]:.2f}MiB''' )
UpperCAmelCase = measures[F'''{i}-peak''']
print(F'''- GPU {i} peak: {peak:.2f}MiB''' )
print(F'''- CPU RAM allocated: {measures['cpu']:.2f}MiB''' )
print(F'''- CPU RAM peak: {measures['cpu-peak']:.2f}MiB''' )
| 719 |
"""simple docstring"""
import math
def _a ( _snake_case ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _a ( _snake_case = 0.1 ):
"""simple docstring"""
UpperCAmelCase = 3
UpperCAmelCase = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_snake_case )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class lowerCamelCase__ ( unittest.TestCase ):
def __init__( self ,A ,A=13 ,A=7 ,A=True ,A=True ,A=True ,A=True ,A=99 ,A=32 ,A=5 ,A=4 ,A=37 ,A="gelu" ,A=0.1 ,A=0.1 ,A=512 ,A=16 ,A=2 ,A=0.02 ,A=4 ,):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_attention_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_choices
def _UpperCamelCase ( self ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = None
if self.use_attention_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase = BertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=A ,initializer_range=self.initializer_range ,)
return config, input_ids, token_type_ids, attention_mask
def _UpperCamelCase ( self ):
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def _UpperCamelCase ( self ):
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = True
UpperCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowerCamelCase__ ( snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _UpperCamelCase ( self ):
UpperCAmelCase = FlaxBertModelTester(self )
@slow
def _UpperCamelCase ( self ):
UpperCAmelCase = FlaxBertModel.from_pretrained("""bert-base-cased""" )
UpperCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(A )
| 720 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE = '''CLIPImageProcessor'''
SCREAMING_SNAKE_CASE = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self ,A=None ,A=None ,**A ):
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,A ,)
UpperCAmelCase = kwargs.pop("""feature_extractor""" )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(A ,A )
def __call__( self ,A=None ,A=None ,A=None ,**A ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
UpperCAmelCase = self.tokenizer(A ,return_tensors=A ,**A )
if images is not None:
UpperCAmelCase = self.image_processor(A ,return_tensors=A ,**A )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A ) ,tensor_type=A )
def _UpperCamelCase ( self ,*A ,**A ):
return self.tokenizer.batch_decode(*A ,**A )
def _UpperCamelCase ( self ,*A ,**A ):
return self.tokenizer.decode(*A ,**A )
@property
def _UpperCamelCase ( self ):
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _UpperCamelCase ( self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,A ,)
return self.image_processor_class
@property
def _UpperCamelCase ( self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,A ,)
return self.image_processor
| 74 | 0 |
"""simple docstring"""
from __future__ import annotations
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
if nth_term == "":
return [""]
UpperCAmelCase = int(_snake_case )
UpperCAmelCase = int(_snake_case )
UpperCAmelCase = []
for temp in range(int(_snake_case ) ):
series.append(F'''1 / {pow(temp + 1 , int(_snake_case ) )}''' if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase = int(input("""Enter the last number (nth term) of the P-Series"""))
_UpperCamelCase = int(input("""Enter the power for P-Series"""))
print("""Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p""")
print(p_series(nth_term, power))
| 721 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
_UpperCamelCase = """https://www.indeed.co.in/jobs?q=mobile+app+development&l="""
def _a ( _snake_case = "mumbai" ):
"""simple docstring"""
UpperCAmelCase = BeautifulSoup(requests.get(url + location ).content , """html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""" , attrs={"""data-tn-component""": """organicJob"""} ):
UpperCAmelCase = job.find("""a""" , attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
UpperCAmelCase = job.find("""span""" , {"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("""Bangalore"""), 1):
print(F"""Job {i:>2} is {job[0]} at {job[1]}""")
| 74 | 0 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
_UpperCamelCase = logging.getLogger(__name__)
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = '''train'''
SCREAMING_SNAKE_CASE = '''dev'''
SCREAMING_SNAKE_CASE = '''test'''
class lowerCamelCase__ :
@staticmethod
def _UpperCamelCase ( A ,A ):
raise NotImplementedError
@staticmethod
def _UpperCamelCase ( A ):
raise NotImplementedError
@staticmethod
def _UpperCamelCase ( A ,A ,A ,A ,A=False ,A="[CLS]" ,A=1 ,A="[SEP]" ,A=False ,A=False ,A=0 ,A=0 ,A=-100 ,A=0 ,A=True ,):
UpperCAmelCase = {label: i for i, label in enumerate(A )}
UpperCAmelCase = []
for ex_index, example in enumerate(A ):
if ex_index % 10_000 == 0:
logger.info("""Writing example %d of %d""" ,A ,len(A ) )
UpperCAmelCase = []
UpperCAmelCase = []
for word, label in zip(example.words ,example.labels ):
UpperCAmelCase = tokenizer.tokenize(A )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(A ) > 0:
tokens.extend(A )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(A ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
UpperCAmelCase = tokenizer.num_special_tokens_to_add()
if len(A ) > max_seq_length - special_tokens_count:
UpperCAmelCase = tokens[: (max_seq_length - special_tokens_count)]
UpperCAmelCase = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
UpperCAmelCase = [sequence_a_segment_id] * len(A )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
UpperCAmelCase = [cls_token] + tokens
UpperCAmelCase = [pad_token_label_id] + label_ids
UpperCAmelCase = [cls_token_segment_id] + segment_ids
UpperCAmelCase = tokenizer.convert_tokens_to_ids(A )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
UpperCAmelCase = [1 if mask_padding_with_zero else 0] * len(A )
# Zero-pad up to the sequence length.
UpperCAmelCase = max_seq_length - len(A )
if pad_on_left:
UpperCAmelCase = ([pad_token] * padding_length) + input_ids
UpperCAmelCase = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
UpperCAmelCase = ([pad_token_segment_id] * padding_length) + segment_ids
UpperCAmelCase = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(A ) == max_seq_length
assert len(A ) == max_seq_length
assert len(A ) == max_seq_length
assert len(A ) == max_seq_length
if ex_index < 5:
logger.info("""*** Example ***""" )
logger.info("""guid: %s""" ,example.guid )
logger.info("""tokens: %s""" ,""" """.join([str(A ) for x in tokens] ) )
logger.info("""input_ids: %s""" ,""" """.join([str(A ) for x in input_ids] ) )
logger.info("""input_mask: %s""" ,""" """.join([str(A ) for x in input_mask] ) )
logger.info("""segment_ids: %s""" ,""" """.join([str(A ) for x in segment_ids] ) )
logger.info("""label_ids: %s""" ,""" """.join([str(A ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
UpperCAmelCase = None
features.append(
InputFeatures(
input_ids=A ,attention_mask=A ,token_type_ids=A ,label_ids=A ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = nn.CrossEntropyLoss().ignore_index
def __init__( self ,A ,A ,A ,A ,A ,A = None ,A=False ,A = Split.train ,):
# Load data features from cache or dataset file
UpperCAmelCase = os.path.join(
A ,"""cached_{}_{}_{}""".format(mode.value ,tokenizer.__class__.__name__ ,str(A ) ) ,)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase = cached_features_file + """.lock"""
with FileLock(A ):
if os.path.exists(A ) and not overwrite_cache:
logger.info(F'''Loading features from cached file {cached_features_file}''' )
UpperCAmelCase = torch.load(A )
else:
logger.info(F'''Creating features from dataset file at {data_dir}''' )
UpperCAmelCase = token_classification_task.read_examples_from_file(A ,A )
# TODO clean up all this to leverage built-in features of tokenizers
UpperCAmelCase = token_classification_task.convert_examples_to_features(
A ,A ,A ,A ,cls_token_at_end=bool(model_type in ["""xlnet"""] ) ,cls_token=tokenizer.cls_token ,cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 ,sep_token=tokenizer.sep_token ,sep_token_extra=A ,pad_on_left=bool(tokenizer.padding_side == """left""" ) ,pad_token=tokenizer.pad_token_id ,pad_token_segment_id=tokenizer.pad_token_type_id ,pad_token_label_id=self.pad_token_label_id ,)
logger.info(F'''Saving features into cached file {cached_features_file}''' )
torch.save(self.features ,A )
def __len__( self ):
return len(self.features )
def __getitem__( self ,A ):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = -100
def __init__( self ,A ,A ,A ,A ,A ,A = None ,A=False ,A = Split.train ,):
UpperCAmelCase = token_classification_task.read_examples_from_file(A ,A )
# TODO clean up all this to leverage built-in features of tokenizers
UpperCAmelCase = token_classification_task.convert_examples_to_features(
A ,A ,A ,A ,cls_token_at_end=bool(model_type in ["""xlnet"""] ) ,cls_token=tokenizer.cls_token ,cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 ,sep_token=tokenizer.sep_token ,sep_token_extra=A ,pad_on_left=bool(tokenizer.padding_side == """left""" ) ,pad_token=tokenizer.pad_token_id ,pad_token_segment_id=tokenizer.pad_token_type_id ,pad_token_label_id=self.pad_token_label_id ,)
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
UpperCAmelCase = tf.data.Dataset.from_generator(
A ,({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) ,(
{"""input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) ,)
else:
UpperCAmelCase = tf.data.Dataset.from_generator(
A ,({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) ,(
{
"""input_ids""": tf.TensorShape([None] ),
"""attention_mask""": tf.TensorShape([None] ),
"""token_type_ids""": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) ,)
def _UpperCamelCase ( self ):
UpperCAmelCase = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self ):
return len(self.features )
def __getitem__( self ,A ):
return self.features[i]
| 700 |
"""simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCamelCase ( self ):
UpperCAmelCase = ["""a""", """b""", """c"""]
# Defaults to last layer if both are None
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(A ,A ,A )
self.assertEqual(A ,["""c"""] )
self.assertEqual(A ,[2] )
# Out indices set to match out features
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(["""a""", """c"""] ,A ,A )
self.assertEqual(A ,["""a""", """c"""] )
self.assertEqual(A ,[0, 2] )
# Out features set to match out indices
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(A ,[0, 2] ,A )
self.assertEqual(A ,["""a""", """c"""] )
self.assertEqual(A ,[0, 2] )
# Out features selected from negative indices
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(A ,[-3, -1] ,A )
self.assertEqual(A ,["""a""", """c"""] )
self.assertEqual(A ,[-3, -1] )
def _UpperCamelCase ( self ):
# Stage names must be set
with self.assertRaises(A ):
verify_out_features_out_indices(["""a""", """b"""] ,(0, 1) ,A )
# Out features must be a list
with self.assertRaises(A ):
verify_out_features_out_indices(("""a""", """b""") ,(0, 1) ,["""a""", """b"""] )
# Out features must be a subset of stage names
with self.assertRaises(A ):
verify_out_features_out_indices(["""a""", """b"""] ,(0, 1) ,["""a"""] )
# Out indices must be a list or tuple
with self.assertRaises(A ):
verify_out_features_out_indices(A ,0 ,["""a""", """b"""] )
# Out indices must be a subset of stage names
with self.assertRaises(A ):
verify_out_features_out_indices(A ,(0, 1) ,["""a"""] )
# Out features and out indices must be the same length
with self.assertRaises(A ):
verify_out_features_out_indices(["""a""", """b"""] ,(0,) ,["""a""", """b""", """c"""] )
# Out features should match out indices
with self.assertRaises(A ):
verify_out_features_out_indices(["""a""", """b"""] ,(0, 2) ,["""a""", """b""", """c"""] )
# Out features and out indices should be in order
with self.assertRaises(A ):
verify_out_features_out_indices(["""b""", """a"""] ,(0, 1) ,["""a""", """b"""] )
# Check passes with valid inputs
verify_out_features_out_indices(["""a""", """b""", """d"""] ,(0, 1, -1) ,["""a""", """b""", """c""", """d"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = BackboneMixin()
UpperCAmelCase = ["""a""", """b""", """c"""]
UpperCAmelCase = ["""a""", """c"""]
UpperCAmelCase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features ,["""a""", """c"""] )
self.assertEqual(backbone.out_indices ,[0, 2] )
# Check out features and indices are updated correctly
UpperCAmelCase = ["""a""", """b"""]
self.assertEqual(backbone.out_features ,["""a""", """b"""] )
self.assertEqual(backbone.out_indices ,[0, 1] )
UpperCAmelCase = [-3, -1]
self.assertEqual(backbone.out_features ,["""a""", """c"""] )
self.assertEqual(backbone.out_indices ,[-3, -1] )
| 74 | 0 |
"""simple docstring"""
class lowerCamelCase__ :
def __init__( self ):
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = {}
def _UpperCamelCase ( self ,A ):
if vertex not in self.adjacency:
UpperCAmelCase = {}
self.num_vertices += 1
def _UpperCamelCase ( self ,A ,A ,A ):
self.add_vertex(A )
self.add_vertex(A )
if head == tail:
return
UpperCAmelCase = weight
UpperCAmelCase = weight
def _UpperCamelCase ( self ):
UpperCAmelCase = self.get_edges()
for edge in edges:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = edge
edges.remove((tail, head, weight) )
for i in range(len(A ) ):
UpperCAmelCase = list(edges[i] )
edges.sort(key=lambda A : e[2] )
for i in range(len(A ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
UpperCAmelCase = edges[i][2] + 1
for edge in edges:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = edge
UpperCAmelCase = weight
UpperCAmelCase = weight
def __str__( self ):
UpperCAmelCase = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
UpperCAmelCase = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip("""\n""" )
def _UpperCamelCase ( self ):
UpperCAmelCase = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def _UpperCamelCase ( self ):
return self.adjacency.keys()
@staticmethod
def _UpperCamelCase ( A=None ,A=None ):
UpperCAmelCase = Graph()
if vertices is None:
UpperCAmelCase = []
if edges is None:
UpperCAmelCase = []
for vertex in vertices:
g.add_vertex(A )
for edge in edges:
g.add_edge(*A )
return g
class lowerCamelCase__ :
def __init__( self ):
UpperCAmelCase = {}
UpperCAmelCase = {}
def __len__( self ):
return len(self.parent )
def _UpperCamelCase ( self ,A ):
if item in self.parent:
return self.find(A )
UpperCAmelCase = item
UpperCAmelCase = 0
return item
def _UpperCamelCase ( self ,A ):
if item not in self.parent:
return self.make_set(A )
if item != self.parent[item]:
UpperCAmelCase = self.find(self.parent[item] )
return self.parent[item]
def _UpperCamelCase ( self ,A ,A ):
UpperCAmelCase = self.find(A )
UpperCAmelCase = self.find(A )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
UpperCAmelCase = roota
return roota
if self.rank[roota] < self.rank[roota]:
UpperCAmelCase = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
UpperCAmelCase = roota
return roota
return None
@staticmethod
def _UpperCamelCase ( A ):
UpperCAmelCase = graph.num_vertices
UpperCAmelCase = Graph.UnionFind()
UpperCAmelCase = []
while num_components > 1:
UpperCAmelCase = {}
for vertex in graph.get_vertices():
UpperCAmelCase = -1
UpperCAmelCase = graph.get_edges()
for edge in edges:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = edge
edges.remove((tail, head, weight) )
for edge in edges:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = edge
UpperCAmelCase = union_find.find(A )
UpperCAmelCase = union_find.find(A )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
UpperCAmelCase = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
UpperCAmelCase = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = cheap_edge[vertex]
if union_find.find(A ) != union_find.find(A ):
union_find.union(A ,A )
mst_edges.append(cheap_edge[vertex] )
UpperCAmelCase = num_components - 1
UpperCAmelCase = Graph.build(edges=A )
return mst
| 701 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class lowerCamelCase__ :
def __init__( self ,A = 6 ):
UpperCAmelCase = None
UpperCAmelCase = None
self.create_linked_list(A )
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = Node()
UpperCAmelCase = current_node
UpperCAmelCase = current_node
UpperCAmelCase = current_node
for _ in range(1 ,A ):
UpperCAmelCase = Node()
UpperCAmelCase = current_node
UpperCAmelCase = previous_node
UpperCAmelCase = current_node
UpperCAmelCase = self.front
UpperCAmelCase = previous_node
def _UpperCamelCase ( self ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def _UpperCamelCase ( self ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def _UpperCamelCase ( self ,A ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
UpperCAmelCase = self.rear.next
if self.rear:
UpperCAmelCase = data
def _UpperCamelCase ( self ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
UpperCAmelCase = self.front.data
UpperCAmelCase = None
return data
UpperCAmelCase = self.front
UpperCAmelCase = old_front.next
UpperCAmelCase = old_front.data
UpperCAmelCase = None
return data
def _UpperCamelCase ( self ):
if self.is_empty():
raise Exception("""Empty Queue""" )
def _UpperCamelCase ( self ):
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class lowerCamelCase__ :
def __init__( self ):
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 | 0 |
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
_UpperCamelCase = [8, 5, 9, 7]
_UpperCamelCase = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_UpperCamelCase = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class lowerCamelCase__ :
def __init__( self ,A ,A ,A ,):
UpperCAmelCase = claim_vector
UpperCAmelCase = allocated_resources_table
UpperCAmelCase = maximum_claim_table
def _UpperCamelCase ( self ):
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def _UpperCamelCase ( self ):
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def _UpperCamelCase ( self ):
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(A ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def _UpperCamelCase ( self ):
return {self.__need().index(A ): i for i in self.__need()}
def _UpperCamelCase ( self ,**A ):
UpperCAmelCase = self.__need()
UpperCAmelCase = self.__allocated_resources_table
UpperCAmelCase = self.__available_resources()
UpperCAmelCase = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("""_""" * 50 + """\n""" )
while need_list:
UpperCAmelCase = False
for each_need in need_list:
UpperCAmelCase = True
for index, need in enumerate(A ):
if need > available_resources[index]:
UpperCAmelCase = False
break
if execution:
UpperCAmelCase = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
UpperCAmelCase = original_need_index
print(F'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(A )
# update available/freed resources stack
UpperCAmelCase = np.array(A ) + np.array(
alloc_resources_table[process_number] )
print(
"""Updated available resource stack for processes: """
+ """ """.join([str(A ) for x in available_resources] ) )
break
if safe:
print("""The process is in a safe state.\n""" )
else:
print("""System in unsafe state. Aborting...\n""" )
break
def _UpperCamelCase ( self ):
print(""" """ * 9 + """Allocated Resource Table""" )
for item in self.__allocated_resources_table:
print(
F'''P{self.__allocated_resources_table.index(A ) + 1}'''
+ """ """.join(F'''{it:>8}''' for it in item )
+ """\n""" )
print(""" """ * 9 + """System Resource Table""" )
for item in self.__maximum_claim_table:
print(
F'''P{self.__maximum_claim_table.index(A ) + 1}'''
+ """ """.join(F'''{it:>8}''' for it in item )
+ """\n""" )
print(
"""Current Usage by Active Processes: """
+ """ """.join(str(A ) for x in self.__claim_vector ) )
print(
"""Initial Available Resources: """
+ """ """.join(str(A ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger("""transformers.models.speecht5""")
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
hf_model.apply_weight_norm()
UpperCAmelCase = checkpoint["""input_conv.weight_g"""]
UpperCAmelCase = checkpoint["""input_conv.weight_v"""]
UpperCAmelCase = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
UpperCAmelCase = checkpoint[F'''upsamples.{i}.1.weight_g''']
UpperCAmelCase = checkpoint[F'''upsamples.{i}.1.weight_v''']
UpperCAmelCase = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
UpperCAmelCase = checkpoint["""output_conv.1.weight_g"""]
UpperCAmelCase = checkpoint["""output_conv.1.weight_v"""]
UpperCAmelCase = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def _a ( _snake_case , _snake_case , _snake_case , _snake_case=None , _snake_case=None , ):
"""simple docstring"""
if config_path is not None:
UpperCAmelCase = SpeechTaHifiGanConfig.from_pretrained(_snake_case )
else:
UpperCAmelCase = SpeechTaHifiGanConfig()
UpperCAmelCase = SpeechTaHifiGan(_snake_case )
UpperCAmelCase = torch.load(_snake_case )
load_weights(orig_checkpoint["""model"""]["""generator"""] , _snake_case , _snake_case )
UpperCAmelCase = np.load(_snake_case )
UpperCAmelCase = stats[0].reshape(-1 )
UpperCAmelCase = stats[1].reshape(-1 )
UpperCAmelCase = torch.from_numpy(_snake_case ).float()
UpperCAmelCase = torch.from_numpy(_snake_case ).float()
model.save_pretrained(_snake_case )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(_snake_case )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_UpperCamelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 74 | 0 |
"""simple docstring"""
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _a ( _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def _a ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=True ):
"""simple docstring"""
model.train()
UpperCAmelCase = model(_snake_case )
UpperCAmelCase = F.mse_loss(_snake_case , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(_snake_case )
def _a ( _snake_case , _snake_case=False ):
"""simple docstring"""
set_seed(42 )
UpperCAmelCase = RegressionModel()
UpperCAmelCase = deepcopy(_snake_case )
UpperCAmelCase = RegressionDataset(length=80 )
UpperCAmelCase = DataLoader(_snake_case , batch_size=16 )
model.to(accelerator.device )
if sched:
UpperCAmelCase = AdamW(params=model.parameters() , lr=1E-3 )
UpperCAmelCase = AdamW(params=ddp_model.parameters() , lr=1E-3 )
UpperCAmelCase = LambdaLR(_snake_case , lr_lambda=lambda _snake_case : epoch**0.65 )
UpperCAmelCase = LambdaLR(_snake_case , lr_lambda=lambda _snake_case : epoch**0.65 )
# Make a copy of `model`
if sched:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(_snake_case , _snake_case , _snake_case , _snake_case )
else:
UpperCAmelCase , UpperCAmelCase = accelerator.prepare(_snake_case , _snake_case )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = get_training_setup(_snake_case )
# Use a single batch
UpperCAmelCase , UpperCAmelCase = next(iter(_snake_case ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase , UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase , UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_snake_case , _snake_case , _snake_case , _snake_case )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_snake_case ):
step_model(_snake_case , _snake_case , _snake_case , _snake_case )
else:
# Sync grads
step_model(_snake_case , _snake_case , _snake_case , _snake_case )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(_snake_case , _snake_case , _snake_case , _snake_case )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCAmelCase = ddp_input[torch.randperm(len(_snake_case ) )]
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = get_training_setup(_snake_case )
# Use a single batch
UpperCAmelCase , UpperCAmelCase = next(iter(_snake_case ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase , UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase , UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_snake_case , _snake_case , _snake_case , _snake_case )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_snake_case ):
step_model(_snake_case , _snake_case , _snake_case , _snake_case )
else:
# Sync grads
step_model(_snake_case , _snake_case , _snake_case , _snake_case )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCAmelCase = ddp_input[torch.randperm(len(_snake_case ) )]
def _a ( _snake_case=False , _snake_case=False ):
"""simple docstring"""
UpperCAmelCase = Accelerator(
split_batches=_snake_case , dispatch_batches=_snake_case , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = get_training_setup(_snake_case )
for iteration, batch in enumerate(_snake_case ):
UpperCAmelCase , UpperCAmelCase = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase , UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase , UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(_snake_case ):
step_model(_snake_case , _snake_case , _snake_case , _snake_case )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(_snake_case ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCAmelCase = ddp_input[torch.randperm(len(_snake_case ) )]
GradientState._reset_state()
def _a ( _snake_case=False , _snake_case=False ):
"""simple docstring"""
UpperCAmelCase = Accelerator(
split_batches=_snake_case , dispatch_batches=_snake_case , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = get_training_setup(_snake_case , _snake_case )
for iteration, batch in enumerate(_snake_case ):
UpperCAmelCase , UpperCAmelCase = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase , UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase , UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_snake_case )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(_snake_case ):
step_model(_snake_case , _snake_case , _snake_case , _snake_case )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n'''
UpperCAmelCase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_snake_case ))
if accelerator.num_processes > 1:
check_model_parameters(_snake_case , _snake_case , _snake_case , _snake_case )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def _a ( ):
"""simple docstring"""
UpperCAmelCase = Accelerator()
UpperCAmelCase = RegressionDataset(length=80 )
UpperCAmelCase = DataLoader(_snake_case , batch_size=16 )
UpperCAmelCase = RegressionDataset(length=96 )
UpperCAmelCase = DataLoader(_snake_case , batch_size=16 )
UpperCAmelCase , UpperCAmelCase = accelerator.prepare(_snake_case , _snake_case )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(_snake_case ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_snake_case )
if iteration < len(_snake_case ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(_snake_case ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_snake_case )
if batch_num < len(_snake_case ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _a ( ):
"""simple docstring"""
UpperCAmelCase = Accelerator()
UpperCAmelCase = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(_snake_case )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(_snake_case )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(_snake_case , _snake_case )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(_snake_case , _snake_case )
def _a ( _snake_case ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 703 |
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
_UpperCamelCase = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_UpperCamelCase = concatenate_datasets
_UpperCamelCase = DownloadConfig
_UpperCamelCase = DownloadManager
_UpperCamelCase = DownloadMode
_UpperCamelCase = DownloadConfig
_UpperCamelCase = DownloadMode
_UpperCamelCase = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 74 | 0 |
"""simple docstring"""
_UpperCamelCase = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_UpperCamelCase = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
_UpperCamelCase = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 704 |
"""simple docstring"""
def _a ( _snake_case ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ):
raise ValueError("""Input must be an integer""" )
if input_num <= 0:
raise ValueError("""Input must be positive""" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=snake_case )
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = field(default='''image-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
SCREAMING_SNAKE_CASE = Features({'''image''': Image()} )
SCREAMING_SNAKE_CASE = Features({'''labels''': ClassLabel} )
SCREAMING_SNAKE_CASE = '''image'''
SCREAMING_SNAKE_CASE = '''labels'''
def _UpperCamelCase ( self ,A ):
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] ,A ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
UpperCAmelCase = copy.deepcopy(self )
UpperCAmelCase = self.label_schema.copy()
UpperCAmelCase = features[self.label_column]
UpperCAmelCase = label_schema
return task_template
@property
def _UpperCamelCase ( self ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 705 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_UpperCamelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
_UpperCamelCase = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
_UpperCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , )
SCREAMING_SNAKE_CASE = field(default=snake_case , metadata={'''help''': '''A folder containing the training data.'''} )
SCREAMING_SNAKE_CASE = field(default=snake_case , metadata={'''help''': '''A folder containing the validation data.'''} )
SCREAMING_SNAKE_CASE = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
SCREAMING_SNAKE_CASE = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} )
SCREAMING_SNAKE_CASE = field(
default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def _UpperCamelCase ( self ):
UpperCAmelCase = {}
if self.train_dir is not None:
UpperCAmelCase = self.train_dir
if self.validation_dir is not None:
UpperCAmelCase = self.validation_dir
UpperCAmelCase = data_files if data_files else None
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '''
'''checkpoint identifier on the hub. '''
'''Don\'t set if you want to train a model from scratch.'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(snake_case )} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , )
SCREAMING_SNAKE_CASE = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
SCREAMING_SNAKE_CASE = field(default=snake_case , metadata={'''help''': '''Name or path of preprocessor config.'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Stride to use for the encoder.'''} , )
class lowerCamelCase__ :
def __init__( self ,A=192 ,A=32 ,A=4 ,A=0.6 ):
UpperCAmelCase = input_size
UpperCAmelCase = mask_patch_size
UpperCAmelCase = model_patch_size
UpperCAmelCase = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("""Input size must be divisible by mask patch size""" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("""Mask patch size must be divisible by model patch size""" )
UpperCAmelCase = self.input_size // self.mask_patch_size
UpperCAmelCase = self.mask_patch_size // self.model_patch_size
UpperCAmelCase = self.rand_size**2
UpperCAmelCase = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self ):
UpperCAmelCase = np.random.permutation(self.token_count )[: self.mask_count]
UpperCAmelCase = np.zeros(self.token_count ,dtype=A )
UpperCAmelCase = 1
UpperCAmelCase = mask.reshape((self.rand_size, self.rand_size) )
UpperCAmelCase = mask.repeat(self.scale ,axis=0 ).repeat(self.scale ,axis=1 )
return torch.tensor(mask.flatten() )
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = torch.stack([example["""pixel_values"""] for example in examples] )
UpperCAmelCase = torch.stack([example["""mask"""] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def _a ( ):
"""simple docstring"""
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mim""" , _snake_case , _snake_case )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(_snake_case )
transformers.utils.logging.set_verbosity(_snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCAmelCase = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _snake_case ) and data_args.train_val_split > 0.0:
UpperCAmelCase = ds["""train"""].train_test_split(data_args.train_val_split )
UpperCAmelCase = split["""train"""]
UpperCAmelCase = split["""test"""]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
UpperCAmelCase = AutoConfig.from_pretrained(model_args.config_name_or_path , **_snake_case )
elif model_args.model_name_or_path:
UpperCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , **_snake_case )
else:
UpperCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(_snake_case , """decoder_type""" ):
UpperCAmelCase = """simmim"""
# adapt config
UpperCAmelCase = model_args.image_size if model_args.image_size is not None else config.image_size
UpperCAmelCase = model_args.patch_size if model_args.patch_size is not None else config.patch_size
UpperCAmelCase = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"""image_size""": model_args.image_size,
"""patch_size""": model_args.patch_size,
"""encoder_stride""": model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
UpperCAmelCase = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **_snake_case )
elif model_args.model_name_or_path:
UpperCAmelCase = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **_snake_case )
else:
UpperCAmelCase = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
UpperCAmelCase = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
UpperCAmelCase = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
UpperCAmelCase = AutoModelForMaskedImageModeling.from_config(_snake_case )
if training_args.do_train:
UpperCAmelCase = ds["""train"""].column_names
else:
UpperCAmelCase = ds["""validation"""].column_names
if data_args.image_column_name is not None:
UpperCAmelCase = data_args.image_column_name
elif "image" in column_names:
UpperCAmelCase = """image"""
elif "img" in column_names:
UpperCAmelCase = """img"""
else:
UpperCAmelCase = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
UpperCAmelCase = Compose(
[
Lambda(lambda _snake_case : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
UpperCAmelCase = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(_snake_case ):
UpperCAmelCase = [transforms(_snake_case ) for image in examples[image_column_name]]
UpperCAmelCase = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
UpperCAmelCase = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_snake_case )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
UpperCAmelCase = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_snake_case )
# Initialize our trainer
UpperCAmelCase = Trainer(
model=_snake_case , args=_snake_case , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=_snake_case , data_collator=_snake_case , )
# Training
if training_args.do_train:
UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase = last_checkpoint
UpperCAmelCase = trainer.train(resume_from_checkpoint=_snake_case )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCAmelCase = trainer.evaluate()
trainer.log_metrics("""eval""" , _snake_case )
trainer.save_metrics("""eval""" , _snake_case )
# Write model card and (optionally) push to hub
UpperCAmelCase = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """masked-image-modeling""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-image-modeling"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_snake_case )
else:
trainer.create_model_card(**_snake_case )
if __name__ == "__main__":
main()
| 74 | 0 |
"""simple docstring"""
from itertools import count
def _a ( _snake_case = 50 ):
"""simple docstring"""
UpperCAmelCase = [1] * min_block_length
for n in count(_snake_case ):
fill_count_functions.append(1 )
for block_length in range(_snake_case , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 706 |
"""simple docstring"""
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
_UpperCamelCase = True
from torch.cuda.amp import autocast
_UpperCamelCase = logging.getLogger(__name__)
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Whether to log verbose messages or not.'''} , )
SCREAMING_SNAKE_CASE = field(
default=2.0 , metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} )
SCREAMING_SNAKE_CASE = field(
default=0.5 , metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} )
SCREAMING_SNAKE_CASE = field(
default=0.99_99_95 , metadata={'''help''': '''Decay of gumbel temperature during training.'''} )
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
UpperCAmelCase = logging.WARNING
if model_args.verbose_logging:
UpperCAmelCase = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
UpperCAmelCase = logging.INFO
logger.setLevel(_snake_case )
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
SCREAMING_SNAKE_CASE = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
SCREAMING_SNAKE_CASE = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
SCREAMING_SNAKE_CASE = field(
default='''file''' , metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
SCREAMING_SNAKE_CASE = field(
default=1 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
SCREAMING_SNAKE_CASE = field(
default=20.0 , metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} )
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = "longest"
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
def __call__( self ,A ):
# reformat list to dict and set to pytorch format
UpperCAmelCase = self.feature_extractor.pad(
A ,max_length=self.max_length ,padding=self.padding ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors="""pt""" ,)
UpperCAmelCase = self.model._get_feat_extract_output_lengths(batch["""input_values"""].shape[-1] )
UpperCAmelCase = batch["""input_values"""].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
UpperCAmelCase = self.model._get_feat_extract_output_lengths(batch["""attention_mask"""].sum(-1 ) ).to(
torch.long )
UpperCAmelCase = torch.zeros(
(batch_size, mask_indices_seq_length) ,dtype=torch.long ,device=batch["""input_values"""].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
UpperCAmelCase = 1
UpperCAmelCase = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
UpperCAmelCase = _compute_mask_indices(
(batch_size, mask_indices_seq_length) ,self.model.config.mask_time_prob ,self.model.config.mask_time_length ,attention_mask=A ,min_masks=2 ,)
return batch
class lowerCamelCase__ ( snake_case ):
def __init__( self ,*A ,A=1 ,A=0 ,A=1.0 ,**A ):
super().__init__(*A ,**A )
UpperCAmelCase = 0
UpperCAmelCase = max_gumbel_temp
UpperCAmelCase = min_gumbel_temp
UpperCAmelCase = gumbel_temp_decay
def _UpperCamelCase ( self ,A ,A ):
model.train()
UpperCAmelCase = self._prepare_inputs(A )
if self.use_amp:
with autocast():
UpperCAmelCase = self.compute_loss(A ,A )
else:
UpperCAmelCase = self.compute_loss(A ,A )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
UpperCAmelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
UpperCAmelCase = loss.sum() / (inputs["""mask_time_indices"""]).sum()
else:
raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
UpperCAmelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(A ).backward()
elif self.use_apex:
with amp.scale_loss(A ,self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(A )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) )
return loss.detach()
def _a ( ):
"""simple docstring"""
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
configure_logger(_snake_case , _snake_case )
# Downloading and loading a dataset from the hub.
UpperCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
UpperCAmelCase = DatasetDict()
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , )
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
UpperCAmelCase = DatasetDict()
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="""validation""" , cache_dir=model_args.cache_dir , )
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_snake_case )
def prepare_dataset(_snake_case ):
# check that all files have the correct sampling rate
UpperCAmelCase , UpperCAmelCase = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
UpperCAmelCase = datasets.map(
_snake_case , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["""train"""].column_names )
# filter audio files that are too long
UpperCAmelCase = vectorized_datasets.filter(
lambda _snake_case : len(data["""speech"""] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_snake_case ):
return feature_extractor(batch["""speech"""] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
UpperCAmelCase = vectorized_datasets.map(
_snake_case , batched=_snake_case , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["""train"""].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
UpperCAmelCase = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"""PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"""
""" ``config.feat_extract_norm='layer'""" )
UpperCAmelCase = WavaVecaForPreTraining(_snake_case )
UpperCAmelCase = DataCollatorForWavaVecaPretraining(model=_snake_case , feature_extractor=_snake_case )
UpperCAmelCase = WavaVecaPreTrainer(
model=_snake_case , data_collator=_snake_case , args=_snake_case , train_dataset=vectorized_datasets["""train"""] , eval_dataset=vectorized_datasets["""validation"""] , tokenizer=_snake_case , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 74 | 0 |
"""simple docstring"""
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class lowerCamelCase__ ( unittest.TestCase ):
def __init__( self ,A ,A=2 ,A=56 ,A=True ,A=True ,A=True ,A=True ,A=99 ,A=32 ,A=2 ,A=2 ,A=7 ,A="gelu_new" ,A=0.1 ,A=0.1 ,A=512 ,A=16 ,A=2 ,A=0.02 ,A=4 ,A="block_sparse" ,A=True ,A=False ,A=2 ,A=3 ,):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_attention_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_choices
UpperCAmelCase = rescale_embeddings
UpperCAmelCase = attention_type
UpperCAmelCase = use_bias
UpperCAmelCase = block_size
UpperCAmelCase = num_random_blocks
def _UpperCamelCase ( self ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = None
if self.use_attention_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase = BigBirdConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=A ,initializer_range=self.initializer_range ,attention_type=self.attention_type ,block_size=self.block_size ,num_random_blocks=self.num_random_blocks ,use_bias=self.use_bias ,rescale_embeddings=self.rescale_embeddings ,)
return config, input_ids, token_type_ids, attention_mask
def _UpperCamelCase ( self ):
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_flax
class lowerCamelCase__ ( snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _UpperCamelCase ( self ):
UpperCAmelCase = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCamelCase ( self ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCamelCase ( self ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCamelCase ( self ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCamelCase ( self ):
super().test_hidden_states_output()
@slow
def _UpperCamelCase ( self ):
for model_class_name in self.all_model_classes:
UpperCAmelCase = model_class_name.from_pretrained("""google/bigbird-roberta-base""" )
self.assertIsNotNone(A )
def _UpperCamelCase ( self ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCamelCase ( self ):
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase = self._prepare_for_class(A ,A )
UpperCAmelCase = model_class(A )
@jax.jit
def model_jitted(A ,A=None ,**A ):
return model(input_ids=A ,attention_mask=A ,**A )
with self.subTest("""JIT Enabled""" ):
UpperCAmelCase = model_jitted(**A ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
UpperCAmelCase = model_jitted(**A ).to_tuple()
self.assertEqual(len(A ) ,len(A ) )
for jitted_output, output in zip(A ,A ):
self.assertEqual(jitted_output.shape ,output.shape )
def _UpperCamelCase ( self ,A ,A ,A ,A=1e-5 ,A="outputs" ,A=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("""outputs.attentions""" ):
return
else:
super().check_pt_flax_outputs(A ,A ,A ,A ,A ,A )
| 707 |
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class lowerCamelCase__ :
def __init__( self ,A ,):
UpperCAmelCase = parent
UpperCAmelCase = 13
UpperCAmelCase = 7
UpperCAmelCase = 30
UpperCAmelCase = self.seq_length + self.mem_len
UpperCAmelCase = 15
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = 99
UpperCAmelCase = [10, 50, 80]
UpperCAmelCase = 32
UpperCAmelCase = 32
UpperCAmelCase = 4
UpperCAmelCase = 8
UpperCAmelCase = 128
UpperCAmelCase = 2
UpperCAmelCase = 2
UpperCAmelCase = None
UpperCAmelCase = 1
UpperCAmelCase = 0
UpperCAmelCase = 3
UpperCAmelCase = self.vocab_size - 1
UpperCAmelCase = 0.01
def _UpperCamelCase ( self ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = TransfoXLConfig(
vocab_size=self.vocab_size ,mem_len=self.mem_len ,clamp_len=self.clamp_len ,cutoffs=self.cutoffs ,d_model=self.hidden_size ,d_embed=self.d_embed ,n_head=self.num_attention_heads ,d_head=self.d_head ,d_inner=self.d_inner ,div_val=self.div_val ,n_layer=self.num_hidden_layers ,eos_token_id=self.eos_token_id ,pad_token_id=self.vocab_size - 1 ,init_range=self.init_range ,num_labels=self.num_labels ,)
return (config, input_ids_a, input_ids_a, lm_labels)
def _UpperCamelCase ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def _UpperCamelCase ( self ,A ,A ,A ,A ):
UpperCAmelCase = TFTransfoXLModel(A )
UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple()
UpperCAmelCase = {"""input_ids""": input_ids_a, """mems""": mems_a}
UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
def _UpperCamelCase ( self ,A ,A ,A ,A ):
UpperCAmelCase = TFTransfoXLLMHeadModel(A )
UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple()
UpperCAmelCase = {"""input_ids""": input_ids_a, """labels""": lm_labels}
UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple()
UpperCAmelCase , UpperCAmelCase = model([input_ids_a, mems_a] ).to_tuple()
UpperCAmelCase = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
def _UpperCamelCase ( self ,A ,A ,A ,A ):
UpperCAmelCase = TFTransfoXLForSequenceClassification(A )
UpperCAmelCase = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.prepare_config_and_inputs()
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) = config_and_inputs
UpperCAmelCase = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( snake_case , snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
SCREAMING_SNAKE_CASE = () if is_tf_available() else ()
SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': TFTransfoXLModel,
'''text-classification''': TFTransfoXLForSequenceClassification,
'''text-generation''': TFTransfoXLLMHeadModel,
'''zero-shot''': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def _UpperCamelCase ( self ):
UpperCAmelCase = TFTransfoXLModelTester(self )
UpperCAmelCase = ConfigTester(self ,config_class=A ,d_embed=37 )
def _UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
self.model_tester.set_seed()
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*A )
def _UpperCamelCase ( self ):
self.model_tester.set_seed()
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(A )
assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
UpperCAmelCase = model.get_output_embeddings()
assert isinstance(A ,tf.keras.layers.Layer )
UpperCAmelCase = model.get_bias()
assert name is None
else:
UpperCAmelCase = model.get_output_embeddings()
assert x is None
UpperCAmelCase = model.get_bias()
assert name is None
def _UpperCamelCase ( self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def _UpperCamelCase ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = TFTransfoXLModel.from_pretrained(A )
self.assertIsNotNone(A )
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" )
def _UpperCamelCase ( self ):
pass
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def _UpperCamelCase ( self ):
UpperCAmelCase = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
UpperCAmelCase = tf.convert_to_tensor([[33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0]] ,dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
UpperCAmelCase = [33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0,33,1,1_857,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,28,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
UpperCAmelCase = model.generate(A ,max_length=200 ,do_sample=A )
self.assertListEqual(output_ids[0].numpy().tolist() ,A )
| 74 | 0 |
"""simple docstring"""
from math import factorial
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
if n < k or k < 0:
raise ValueError("""Please enter positive integers for n and k where n >= k""" )
return factorial(_snake_case ) // (factorial(_snake_case ) * factorial(n - k ))
if __name__ == "__main__":
print(
"""The number of five-card hands possible from a standard""",
F"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
"""If a class of 40 students must be arranged into groups of""",
F"""4 for group projects, there are {combinations(40, 4)} ways""",
"""to arrange them.\n""",
)
print(
"""If 10 teams are competing in a Formula One race, there""",
F"""are {combinations(10, 3)} ways that first, second and""",
"""third place can be awarded.""",
)
| 708 |
"""simple docstring"""
from math import sqrt
def _a ( _snake_case = 100_0000 ):
"""simple docstring"""
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_snake_case , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""")
| 74 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"""alibaba-damo/mgp-str-base""": """https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json""",
}
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = '''mgp-str'''
def __init__( self ,A=[32, 128] ,A=4 ,A=3 ,A=27 ,A=38 ,A=50_257 ,A=30_522 ,A=768 ,A=12 ,A=12 ,A=4.0 ,A=True ,A=False ,A=1e-5 ,A=0.0 ,A=0.0 ,A=0.0 ,A=False ,A=0.02 ,**A ,):
super().__init__(**A )
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = max_token_length
UpperCAmelCase = num_character_labels
UpperCAmelCase = num_bpe_labels
UpperCAmelCase = num_wordpiece_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = mlp_ratio
UpperCAmelCase = distilled
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = drop_rate
UpperCAmelCase = qkv_bias
UpperCAmelCase = attn_drop_rate
UpperCAmelCase = drop_path_rate
UpperCAmelCase = output_aa_attentions
UpperCAmelCase = initializer_range
| 709 |
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
_UpperCamelCase = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
_UpperCamelCase = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def _a ( _snake_case , _snake_case=False ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = create_model(
"""HTSAT-tiny""" , """roberta""" , _snake_case , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=_snake_case , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = {}
UpperCAmelCase = R""".*sequential.(\d+).*"""
UpperCAmelCase = R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCAmelCase = key.replace(_snake_case , _snake_case )
if re.match(_snake_case , _snake_case ):
# replace sequential layers with list
UpperCAmelCase = re.match(_snake_case , _snake_case ).group(1 )
UpperCAmelCase = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(_snake_case )//3}.linear.''' )
elif re.match(_snake_case , _snake_case ):
UpperCAmelCase = int(re.match(_snake_case , _snake_case ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
UpperCAmelCase = 1 if projecton_layer == 0 else 2
UpperCAmelCase = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
UpperCAmelCase = value
UpperCAmelCase = mixed_qkv.size(0 ) // 3
UpperCAmelCase = mixed_qkv[:qkv_dim]
UpperCAmelCase = mixed_qkv[qkv_dim : qkv_dim * 2]
UpperCAmelCase = mixed_qkv[qkv_dim * 2 :]
UpperCAmelCase = query_layer
UpperCAmelCase = key_layer
UpperCAmelCase = value_layer
else:
UpperCAmelCase = value
return model_state_dict
def _a ( _snake_case , _snake_case , _snake_case , _snake_case=False ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = init_clap(_snake_case , enable_fusion=_snake_case )
clap_model.eval()
UpperCAmelCase = clap_model.state_dict()
UpperCAmelCase = rename_state_dict(_snake_case )
UpperCAmelCase = ClapConfig()
UpperCAmelCase = enable_fusion
UpperCAmelCase = ClapModel(_snake_case )
# ignore the spectrogram embedding layer
model.load_state_dict(_snake_case , strict=_snake_case )
model.save_pretrained(_snake_case )
transformers_config.save_pretrained(_snake_case )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
_UpperCamelCase = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 74 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
UpperCAmelCase = 128
elif "12-12" in model_name:
UpperCAmelCase = 12
UpperCAmelCase = 12
elif "14-14" in model_name:
UpperCAmelCase = 14
UpperCAmelCase = 14
elif "16-16" in model_name:
UpperCAmelCase = 16
UpperCAmelCase = 16
else:
raise ValueError("""Model not supported""" )
UpperCAmelCase = """huggingface/label-files"""
if "speech-commands" in model_name:
UpperCAmelCase = 35
UpperCAmelCase = """speech-commands-v2-id2label.json"""
else:
UpperCAmelCase = 527
UpperCAmelCase = """audioset-id2label.json"""
UpperCAmelCase = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase = {int(_snake_case ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def _a ( _snake_case ):
"""simple docstring"""
if "module.v" in name:
UpperCAmelCase = name.replace("""module.v""" , """audio_spectrogram_transformer""" )
if "cls_token" in name:
UpperCAmelCase = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "dist_token" in name:
UpperCAmelCase = name.replace("""dist_token""" , """embeddings.distillation_token""" )
if "pos_embed" in name:
UpperCAmelCase = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
UpperCAmelCase = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
# transformer blocks
if "blocks" in name:
UpperCAmelCase = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
UpperCAmelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
UpperCAmelCase = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
UpperCAmelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
UpperCAmelCase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
UpperCAmelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
UpperCAmelCase = name.replace("""mlp.fc2""" , """output.dense""" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
UpperCAmelCase = name.replace("""audio_spectrogram_transformer.norm""" , """audio_spectrogram_transformer.layernorm""" )
# classifier head
if "module.mlp_head.0" in name:
UpperCAmelCase = name.replace("""module.mlp_head.0""" , """classifier.layernorm""" )
if "module.mlp_head.1" in name:
UpperCAmelCase = name.replace("""module.mlp_head.1""" , """classifier.dense""" )
return name
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCAmelCase = orig_state_dict.pop(_snake_case )
if "qkv" in key:
UpperCAmelCase = key.split(""".""" )
UpperCAmelCase = int(key_split[3] )
UpperCAmelCase = config.hidden_size
if "weight" in key:
UpperCAmelCase = val[:dim, :]
UpperCAmelCase = val[dim : dim * 2, :]
UpperCAmelCase = val[-dim:, :]
else:
UpperCAmelCase = val[:dim]
UpperCAmelCase = val[dim : dim * 2]
UpperCAmelCase = val[-dim:]
else:
UpperCAmelCase = val
return orig_state_dict
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = [
"""module.v.head.weight""",
"""module.v.head.bias""",
"""module.v.head_dist.weight""",
"""module.v.head_dist.bias""",
]
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
@torch.no_grad()
def _a ( _snake_case , _snake_case , _snake_case=False ):
"""simple docstring"""
UpperCAmelCase = get_audio_spectrogram_transformer_config(_snake_case )
UpperCAmelCase = {
"""ast-finetuned-audioset-10-10-0.4593""": (
"""https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.450""": (
"""https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448""": (
"""https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448-v2""": (
"""https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"""
),
"""ast-finetuned-audioset-12-12-0.447""": (
"""https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"""
),
"""ast-finetuned-audioset-14-14-0.443""": (
"""https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"""
),
"""ast-finetuned-audioset-16-16-0.442""": (
"""https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"""
),
"""ast-finetuned-speech-commands-v2""": (
"""https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"""
),
}
# load original state_dict
UpperCAmelCase = model_name_to_url[model_name]
UpperCAmelCase = torch.hub.load_state_dict_from_url(_snake_case , map_location="""cpu""" )
# remove some keys
remove_keys(_snake_case )
# rename some keys
UpperCAmelCase = convert_state_dict(_snake_case , _snake_case )
# load 🤗 model
UpperCAmelCase = ASTForAudioClassification(_snake_case )
model.eval()
model.load_state_dict(_snake_case )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
UpperCAmelCase = -4.2677393 if """speech-commands""" not in model_name else -6.845978
UpperCAmelCase = 4.5689974 if """speech-commands""" not in model_name else 5.5654526
UpperCAmelCase = 1024 if """speech-commands""" not in model_name else 128
UpperCAmelCase = ASTFeatureExtractor(mean=_snake_case , std=_snake_case , max_length=_snake_case )
if "speech-commands" in model_name:
UpperCAmelCase = load_dataset("""speech_commands""" , """v0.02""" , split="""validation""" )
UpperCAmelCase = dataset[0]["""audio"""]["""array"""]
else:
UpperCAmelCase = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" , )
UpperCAmelCase , UpperCAmelCase = torchaudio.load(_snake_case )
UpperCAmelCase = waveform.squeeze().numpy()
UpperCAmelCase = feature_extractor(_snake_case , sampling_rate=1_6000 , return_tensors="""pt""" )
# forward pass
UpperCAmelCase = model(**_snake_case )
UpperCAmelCase = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
UpperCAmelCase = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
UpperCAmelCase = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
UpperCAmelCase = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
UpperCAmelCase = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
UpperCAmelCase = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
UpperCAmelCase = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
UpperCAmelCase = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
UpperCAmelCase = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError("""Unknown model name""" )
if not torch.allclose(logits[0, :3] , _snake_case , atol=1E-4 ):
raise ValueError("""Logits don't match""" )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(_snake_case ).mkdir(exist_ok=_snake_case )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(_snake_case )
if push_to_hub:
print("""Pushing model and feature extractor to the hub...""" )
model.push_to_hub(F'''MIT/{model_name}''' )
feature_extractor.push_to_hub(F'''MIT/{model_name}''' )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_UpperCamelCase = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 710 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_UpperCamelCase = abspath(join(dirname(dirname(__file__)), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def _a ( _snake_case ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_snake_case )
def _a ( _snake_case ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
UpperCAmelCase = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(_snake_case , id=_snake_case )
| 74 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.