code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def snake_case ( snake_case__ :Optional[int] , snake_case__ :bool = True , snake_case__ :float = math.inf , snake_case__ :float = -math.inf , snake_case__ :float = math.inf , snake_case__ :float = -math.inf , snake_case__ :bool = False , snake_case__ :float = 100 , snake_case__ :float = 0.01 , snake_case__ :float = 1 , ) -> Any:
_A = False
_A = search_prob
_A = start_temperate
_A = []
_A = 0
_A = None
while not search_end:
_A = current_state.score()
if best_state is None or current_score > best_state.score():
_A = current_state
scores.append(snake_case__)
iterations += 1
_A = None
_A = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_A = random.randint(0 , len(snake_case__) - 1) # picking a random neighbor
_A = neighbors.pop(snake_case__)
_A = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_A = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_A = picked_neighbor
else:
_A = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_A = picked_neighbor
_A = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_A = True
else:
_A = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(snake_case__) , snake_case__)
plt.xlabel("""Iterations""")
plt.ylabel("""Function values""")
plt.show()
return best_state
if __name__ == "__main__":
def snake_case ( snake_case__ :Optional[Any] , snake_case__ :Optional[int]) -> Dict:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
_SCREAMING_SNAKE_CASE = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_SCREAMING_SNAKE_CASE = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
_SCREAMING_SNAKE_CASE = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_SCREAMING_SNAKE_CASE = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def snake_case ( snake_case__ :Optional[int] , snake_case__ :Union[str, Any]) -> List[str]:
return (3 * x**2) - (6 * y)
_SCREAMING_SNAKE_CASE = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_SCREAMING_SNAKE_CASE = simulated_annealing(prob, find_max=False, visualization=True)
print(
'The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
F'''{local_min.score()}'''
)
_SCREAMING_SNAKE_CASE = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_SCREAMING_SNAKE_CASE = simulated_annealing(prob, find_max=True, visualization=True)
print(
'The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
F'''{local_min.score()}'''
)
| 83 | from __future__ import annotations
from collections.abc import Callable
def snake_case ( snake_case__ :Callable[[int | float], int | float] , snake_case__ :int | float , snake_case__ :int | float , snake_case__ :int = 100 , ) -> float:
_A = x_start
_A = fnc(snake_case__)
_A = 0.0
for _ in range(snake_case__):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
_A = (x_end - x_start) / steps + xa
_A = fnc(snake_case__)
area += abs(fxa + fxa) * (xa - xa) / 2
# Increment step
_A = xa
_A = fxa
return area
if __name__ == "__main__":
def snake_case ( snake_case__ :Tuple) -> List[str]:
return x**3 + x**2
print('f(x) = x^3 + x^2')
print('The area between the curve, x = -5, x = 5 and the x axis is:')
_SCREAMING_SNAKE_CASE = 10
while i <= 100_000:
print(F'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10
| 83 | 1 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
def snake_case ( ) -> int:
_A = argparse.ArgumentParser(
description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""")
parser.add_argument(
"""--dataset_name""" , type=snake_case__ , default="""wikitext""" , help="""Name of the training. Explore datasets at: hf.co/datasets.""" , )
parser.add_argument(
"""--dataset_config""" , type=snake_case__ , default="""wikitext-103-raw-v1""" , help="""Configuration name of the dataset.""")
parser.add_argument(
"""--tokenizer_name_or_path""" , type=snake_case__ , default="""sayakpaul/unigram-tokenizer-wikitext""" , help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" , )
parser.add_argument(
"""--shard_size""" , type=snake_case__ , default=1_000 , help="""Number of entries to go in a single shard.""" , )
parser.add_argument("""--split""" , type=snake_case__ , default="""train""" , choices=["""train""", """test""", """validation"""])
parser.add_argument(
"""--limit""" , default=snake_case__ , type=snake_case__ , help="""Limit the number of shards (used for debugging).""" , )
parser.add_argument(
"""--max_length""" , type=snake_case__ , default=512 , help="""Maximum sequence length. For training on TPUs, it helps to have a maximum"""
""" sequence length that is a multiple of 8.""" , )
parser.add_argument(
"""--output_dir""" , default="""tf-tpu""" , type=snake_case__ , help="""Output directory where the TFRecord shards will be saved. If the"""
""" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"""
""" shards will be directly saved to a Google Cloud Storage bucket.""" , )
_A = parser.parse_args()
return args
def snake_case ( snake_case__ :Optional[int]) -> Dict:
def fn(snake_case__ :Optional[int]):
return tokenizer(examples["""text"""])
return fn
def snake_case ( snake_case__ :Optional[int]) -> Any:
_A = []
for i in range(len(tokenized_data["""input_ids"""])):
_A = {
"""input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i])),
"""attention_mask""": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i])),
}
_A = tf.train.Features(feature=snake_case__)
_A = tf.train.Example(features=snake_case__)
_A = example.SerializeToString()
records.append(snake_case__)
return records
def snake_case ( snake_case__ :int) -> List[Any]:
_A = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split)
if args.limit is not None:
_A = min(len(snake_case__) , args.limit)
_A = dataset.select(range(snake_case__))
print(F'''Limiting the dataset to {args.limit} entries.''')
_A = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path)
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
_A = os.path.join(args.output_dir , args.split)
if not os.path.exists(snake_case__):
os.makedirs(snake_case__)
else:
_A = os.path.join(args.output_dir , args.split)
# Tokenize the whole dataset at once.
_A = tokenize_function(snake_case__)
_A = dataset.map(snake_case__ , batched=snake_case__ , num_proc=4 , remove_columns=["""text"""])
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(snake_case__ :Dict):
# Concatenate all texts.
_A = {k: sum(examples[k] , []) for k in examples.keys()}
_A = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
_A = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
_A = {
k: [t[i : i + args.max_length] for i in range(0 , snake_case__ , args.max_length)]
for k, t in concatenated_examples.items()
}
return result
_A = dataset_tokenized.map(snake_case__ , batched=snake_case__ , batch_size=1_000 , num_proc=4)
_A = 0
_A = 0
for shard in range(0 , len(snake_case__) , args.shard_size):
_A = grouped_dataset[shard : shard + args.shard_size]
_A = len(dataset_snapshot["""input_ids"""])
_A = os.path.join(snake_case__ , F'''dataset-{shard_count}-{records_containing}.tfrecord''')
_A = get_serialized_examples(snake_case__)
with tf.io.TFRecordWriter(snake_case__) as out_file:
for i in range(len(snake_case__)):
_A = serialized_examples[i]
out_file.write(snake_case__)
print("""Wrote file {} containing {} records""".format(snake_case__ , snake_case__))
shard_count += 1
total_records += records_containing
with open(F'''split-{args.split}-records-count.txt''' , """w""") as f:
print(F'''Total {args.split} records: {total_records}''' , file=snake_case__)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = parse_args()
main(args)
| 83 | import numpy as np
import qiskit
def snake_case ( snake_case__ :int = 8 , snake_case__ :int | None = None) -> str:
_A = np.random.default_rng(seed=snake_case__)
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
_A = 6 * key_len
# Measurement basis for Alice's qubits.
_A = rng.integers(2 , size=snake_case__)
# The set of states Alice will prepare.
_A = rng.integers(2 , size=snake_case__)
# Measurement basis for Bob's qubits.
_A = rng.integers(2 , size=snake_case__)
# Quantum Circuit to simulate BB84
_A = qiskit.QuantumCircuit(snake_case__ , name="""BB84""")
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(snake_case__):
if alice_state[index] == 1:
bbaa_circ.x(snake_case__)
if alice_basis[index] == 1:
bbaa_circ.h(snake_case__)
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(snake_case__):
if bob_basis[index] == 1:
bbaa_circ.h(snake_case__)
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
_A = qiskit.Aer.get_backend("""aer_simulator""")
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
_A = qiskit.execute(snake_case__ , snake_case__ , shots=1 , seed_simulator=snake_case__)
# Returns the result of measurement.
_A = job.result().get_counts(snake_case__).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
_A = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
snake_case__ , snake_case__ , snake_case__)
if alice_basis_bit == bob_basis_bit
])
# Get final key. Pad with 0 if too short, otherwise truncate.
_A = gen_key[:key_len] if len(snake_case__) >= key_len else gen_key.ljust(snake_case__ , """0""")
return key
if __name__ == "__main__":
print(F'''The generated key is : {bbaa(8, seed=0)}''')
from doctest import testmod
testmod()
| 83 | 1 |
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
_SCREAMING_SNAKE_CASE = 50_003
_SCREAMING_SNAKE_CASE = 50_002
@require_sentencepiece
@require_tokenizers
class a ( __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :Union[str, Any] = PLBartTokenizer
lowerCamelCase :Optional[Any] = None
lowerCamelCase :int = False
def UpperCAmelCase ( self ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
_A = PLBartTokenizer(lowerCAmelCase_ , language_codes="""base""" , keep_accents=lowerCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self ) -> List[Any]:
_A = PLBartTokenizer(lowerCAmelCase_ , language_codes="""base""" , keep_accents=lowerCAmelCase_ )
_A = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
_A = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_A = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_A = tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
_A = tokenizer.vocab_size
_A = [tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) for x in range(end - 4 , lowerCAmelCase_ )]
self.assertListEqual(lowerCAmelCase_ , ["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] )
_A = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
_A = tokenizer(lowerCAmelCase_ ).input_ids
self.assertEqual(
tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ ) , lowerCAmelCase_ , )
def UpperCAmelCase ( self ) -> int:
_A = PLBartTokenizer(lowerCAmelCase_ , language_codes="""multi""" , keep_accents=lowerCAmelCase_ )
_A = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
_A = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_A = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_A = tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
_A = tokenizer.vocab_size
_A = [tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) for x in range(end - 7 , lowerCAmelCase_ )]
self.assertListEqual(
lowerCAmelCase_ , ["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] )
_A = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
_A = tokenizer(lowerCAmelCase_ ).input_ids
self.assertEqual(
tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ ) , lowerCAmelCase_ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :Any = '''uclanlp/plbart-python-en_XX'''
lowerCamelCase :List[str] = [
'''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''',
'''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''',
]
lowerCamelCase :Dict = [
'''Returns the maximum value of a b c.''',
'''Sums the values of a b c.''',
]
lowerCamelCase :List[str] = [
134,
5452,
33460,
33441,
33463,
33465,
33463,
33449,
988,
20,
33456,
19,
33456,
771,
39,
4258,
889,
3318,
33441,
33463,
33465,
33463,
33449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def UpperCAmelCase ( cls ) -> int:
_A = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="""base""" , src_lang="""python""" , tgt_lang="""en_XX""" )
_A = 1
return cls
def UpperCAmelCase ( self ) -> Tuple:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] , 5_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] , 5_00_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] , 5_00_03 )
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> str:
self.assertIn(lowerCAmelCase_ , self.tokenizer.all_special_ids )
_A = [EN_CODE, 90_37, 3_34_42, 57, 7_52, 1_53, 14, 56, 18, 9, 2]
_A = self.tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
_A = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = ["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 20]
self.assertIsInstance(src_text[0] , lowerCAmelCase_ )
_A = 10
_A = self.tokenizer(lowerCAmelCase_ , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowerCAmelCase_ )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Optional[int]:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ) , [5_00_04, 5_00_01] )
def UpperCAmelCase ( self ) -> Any:
_A = tempfile.mkdtemp()
_A = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase_ )
_A = PLBartTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase_ )
@require_torch
def UpperCAmelCase ( self ) -> str:
_A = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , return_tensors="""pt""" )
_A = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , lowerCAmelCase_ )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def UpperCAmelCase ( self ) -> Dict:
_A = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
_A = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
_A = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def UpperCAmelCase ( self ) -> Dict:
_A = self.tokenizer(self.src_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=3 , return_tensors="""pt""" )
_A = self.tokenizer(
text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=10 , return_tensors="""pt""" )
_A = targets["""input_ids"""]
_A = shift_tokens_right(lowerCAmelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""java""" )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , {
# A, test, EOS, en_XX
"""input_ids""": [[1_50, 2_42, 2, 5_00_03]],
"""attention_mask""": [[1, 1, 1, 1]],
# java
"""forced_bos_token_id""": 5_00_01,
} , )
| 83 | import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def snake_case ( snake_case__ :int) -> Optional[int]:
return EnvironmentCommand()
def snake_case ( snake_case__ :Tuple) -> List[str]:
return EnvironmentCommand(args.accelerate_config_file)
class a ( __lowerCAmelCase ):
"""simple docstring"""
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ ) -> Tuple:
_A = parser.add_parser("""env""" )
download_parser.set_defaults(func=lowerCAmelCase_ )
download_parser.add_argument(
"""--accelerate-config_file""" , default=lowerCAmelCase_ , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=lowerCAmelCase_ )
def __init__( self , lowerCAmelCase_ , *lowerCAmelCase_ ) -> None:
_A = accelerate_config_file
def UpperCAmelCase ( self ) -> Dict:
_A = """not installed"""
if is_safetensors_available():
import safetensors
_A = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
_A = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
_A = """not installed"""
_A = _A = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_A = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(lowerCAmelCase_ ):
_A = load_config_from_file(self._accelerate_config_file ).to_dict()
_A = (
"""\n""".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
else F'''\t{accelerate_config}'''
)
_A = """not installed"""
_A = """NA"""
if is_torch_available():
import torch
_A = torch.__version__
_A = torch.cuda.is_available()
_A = """not installed"""
_A = """NA"""
if is_tf_available():
import tensorflow as tf
_A = tf.__version__
try:
# deprecated in v2.1
_A = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_A = bool(tf.config.list_physical_devices("""GPU""" ) )
_A = """not installed"""
_A = """not installed"""
_A = """not installed"""
_A = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
_A = flax.__version__
_A = jax.__version__
_A = jaxlib.__version__
_A = jax.lib.xla_bridge.get_backend().platform
_A = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": F'''{safetensors_version}''',
"""Accelerate version""": F'''{accelerate_version}''',
"""Accelerate config""": F'''{accelerate_config_str}''',
"""PyTorch version (GPU?)""": F'''{pt_version} ({pt_cuda_available})''',
"""Tensorflow version (GPU?)""": F'''{tf_version} ({tf_cuda_available})''',
"""Flax version (CPU?/GPU?/TPU?)""": F'''{flax_version} ({jax_backend})''',
"""Jax version""": F'''{jax_version}''',
"""JaxLib version""": F'''{jaxlib_version}''',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(lowerCAmelCase_ ) )
return info
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ ) -> Tuple:
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 83 | 1 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class a ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
@register_to_config
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False , ) -> Tuple:
super().__init__()
_A = nn.Embedding(lowerCAmelCase_ , lowerCAmelCase_ )
_A = nn.Embedding(lowerCAmelCase_ , lowerCAmelCase_ )
_A = False
_A = nn.Dropout(p=lowerCAmelCase_ )
_A = TaConfig(
vocab_size=lowerCAmelCase_ , d_model=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , feed_forward_proj=lowerCAmelCase_ , is_decoder=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , )
_A = nn.ModuleList()
for lyr_num in range(lowerCAmelCase_ ):
_A = TaBlock(lowerCAmelCase_ )
self.encoders.append(lowerCAmelCase_ )
_A = TaLayerNorm(lowerCAmelCase_ )
_A = nn.Dropout(p=lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_A = self.token_embedder(lowerCAmelCase_ )
_A = encoder_input_tokens.shape[1]
_A = torch.arange(lowerCAmelCase_ , device=encoder_input_tokens.device )
x += self.position_encoding(lowerCAmelCase_ )
_A = self.dropout_pre(lowerCAmelCase_ )
# inverted the attention mask
_A = encoder_input_tokens.size()
_A = self.get_extended_attention_mask(lowerCAmelCase_ , lowerCAmelCase_ )
for lyr in self.encoders:
_A = lyr(lowerCAmelCase_ , lowerCAmelCase_ )[0]
_A = self.layer_norm(lowerCAmelCase_ )
return self.dropout_post(lowerCAmelCase_ ), encoder_inputs_mask
| 83 | import colorsys
from PIL import Image # type: ignore
def snake_case ( snake_case__ :float , snake_case__ :float , snake_case__ :int) -> float:
_A = x
_A = y
for step in range(snake_case__): # noqa: B007
_A = a * a - b * b + x
_A = 2 * a * b + y
_A = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def snake_case ( snake_case__ :float) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def snake_case ( snake_case__ :float) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(snake_case__ , 1 , 1))
def snake_case ( snake_case__ :int = 800 , snake_case__ :int = 600 , snake_case__ :float = -0.6 , snake_case__ :float = 0 , snake_case__ :float = 3.2 , snake_case__ :int = 50 , snake_case__ :bool = True , ) -> Image.Image:
_A = Image.new("""RGB""" , (image_width, image_height))
_A = img.load()
# loop through the image-coordinates
for image_x in range(snake_case__):
for image_y in range(snake_case__):
# determine the figure-coordinates based on the image-coordinates
_A = figure_width / image_width * image_height
_A = figure_center_x + (image_x / image_width - 0.5) * figure_width
_A = figure_center_y + (image_y / image_height - 0.5) * figure_height
_A = get_distance(snake_case__ , snake_case__ , snake_case__)
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_A = get_color_coded_rgb(snake_case__)
else:
_A = get_black_and_white_rgb(snake_case__)
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_SCREAMING_SNAKE_CASE = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 83 | 1 |
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def snake_case ( snake_case__ :List[Any]) -> List[str]:
_A = SwinConfig()
_A = swin_name.split("""_""")
_A = name_split[1]
_A = int(name_split[4])
_A = int(name_split[3][-1])
if model_size == "tiny":
_A = 96
_A = (2, 2, 6, 2)
_A = (3, 6, 12, 24)
elif model_size == "small":
_A = 96
_A = (2, 2, 18, 2)
_A = (3, 6, 12, 24)
elif model_size == "base":
_A = 128
_A = (2, 2, 18, 2)
_A = (4, 8, 16, 32)
else:
_A = 192
_A = (2, 2, 18, 2)
_A = (6, 12, 24, 48)
if "in22k" in swin_name:
_A = 21_841
else:
_A = 1_000
_A = """huggingface/label-files"""
_A = """imagenet-1k-id2label.json"""
_A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""") , """r"""))
_A = {int(snake_case__): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
_A = img_size
_A = num_classes
_A = embed_dim
_A = depths
_A = num_heads
_A = window_size
return config
def snake_case ( snake_case__ :List[str]) -> str:
if "patch_embed.proj" in name:
_A = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""")
if "patch_embed.norm" in name:
_A = name.replace("""patch_embed.norm""" , """embeddings.norm""")
if "layers" in name:
_A = """encoder.""" + name
if "attn.proj" in name:
_A = name.replace("""attn.proj""" , """attention.output.dense""")
if "attn" in name:
_A = name.replace("""attn""" , """attention.self""")
if "norm1" in name:
_A = name.replace("""norm1""" , """layernorm_before""")
if "norm2" in name:
_A = name.replace("""norm2""" , """layernorm_after""")
if "mlp.fc1" in name:
_A = name.replace("""mlp.fc1""" , """intermediate.dense""")
if "mlp.fc2" in name:
_A = name.replace("""mlp.fc2""" , """output.dense""")
if name == "norm.weight":
_A = """layernorm.weight"""
if name == "norm.bias":
_A = """layernorm.bias"""
if "head" in name:
_A = name.replace("""head""" , """classifier""")
else:
_A = """swin.""" + name
return name
def snake_case ( snake_case__ :Union[str, Any] , snake_case__ :Optional[int]) -> List[Any]:
for key in orig_state_dict.copy().keys():
_A = orig_state_dict.pop(snake_case__)
if "mask" in key:
continue
elif "qkv" in key:
_A = key.split(""".""")
_A = int(key_split[1])
_A = int(key_split[3])
_A = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_A = val[:dim, :]
_A = val[
dim : dim * 2, :
]
_A = val[-dim:, :]
else:
_A = val[
:dim
]
_A = val[
dim : dim * 2
]
_A = val[
-dim:
]
else:
_A = val
return orig_state_dict
def snake_case ( snake_case__ :List[Any] , snake_case__ :str) -> Tuple:
_A = timm.create_model(snake_case__ , pretrained=snake_case__)
timm_model.eval()
_A = get_swin_config(snake_case__)
_A = SwinForImageClassification(snake_case__)
model.eval()
_A = convert_state_dict(timm_model.state_dict() , snake_case__)
model.load_state_dict(snake_case__)
_A = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_A = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""")))
_A = Image.open(requests.get(snake_case__ , stream=snake_case__).raw)
_A = image_processor(images=snake_case__ , return_tensors="""pt""")
_A = timm_model(inputs["""pixel_values"""])
_A = model(**snake_case__).logits
assert torch.allclose(snake_case__ , snake_case__ , atol=1E-3)
print(F'''Saving model {swin_name} to {pytorch_dump_folder_path}''')
model.save_pretrained(snake_case__)
print(F'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(snake_case__)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swin_name',
default='swin_tiny_patch4_window7_224',
type=str,
help='Name of the Swin timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 83 | import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
_SCREAMING_SNAKE_CASE = datasets.logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
_SCREAMING_SNAKE_CASE = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
_SCREAMING_SNAKE_CASE = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def snake_case ( snake_case__ :Optional[Any] , snake_case__ :str , snake_case__ :List[str]=False , snake_case__ :Dict=False , snake_case__ :Any=True , snake_case__ :List[str]=False , snake_case__ :Optional[Any]="dummy_doc") -> List[Any]:
_A = {doc: key_lines}
_A = {doc: sys_lines}
_A = {}
_A = 0
_A = 0
_A = 0
_A = 0
_A = 0
_A = 0
_A , _A = reader.get_doc_mentions(snake_case__ , key_doc_lines[doc] , snake_case__)
key_singletons_num += singletons_num
if NP_only or min_span:
_A = reader.set_annotated_parse_trees(snake_case__ , key_doc_lines[doc] , snake_case__ , snake_case__)
_A , _A = reader.get_doc_mentions(snake_case__ , sys_doc_lines[doc] , snake_case__)
sys_singletons_num += singletons_num
if NP_only or min_span:
_A = reader.set_annotated_parse_trees(snake_case__ , key_doc_lines[doc] , snake_case__ , snake_case__)
if remove_nested:
_A , _A = reader.remove_nested_coref_mentions(snake_case__ , snake_case__)
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
_A , _A = reader.remove_nested_coref_mentions(snake_case__ , snake_case__)
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
_A = reader.get_mention_assignments(snake_case__ , snake_case__)
_A = reader.get_mention_assignments(snake_case__ , snake_case__)
_A = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''')
logger.info(
"""Number of resulting singleton clusters in the key """
F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''')
if not keep_singletons:
logger.info(
F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
"""files, respectively""")
return doc_coref_infos
def snake_case ( snake_case__ :Tuple , snake_case__ :Tuple , snake_case__ :Dict , snake_case__ :Dict , snake_case__ :Tuple , snake_case__ :Dict , snake_case__ :Tuple) -> int:
_A = get_coref_infos(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__)
_A = {}
_A = 0
_A = 0
for name, metric in metrics:
_A , _A , _A = evaluator.evaluate_documents(snake_case__ , snake_case__ , beta=1)
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa})
logger.info(
name.ljust(10) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , )
if conll_subparts_num == 3:
_A = (conll / 3) * 100
logger.info(F'''CoNLL score: {conll:.2f}''')
output_scores.update({"""conll_score""": conll})
return output_scores
def snake_case ( snake_case__ :Union[str, Any]) -> List[Any]:
_A = False
for line in key_lines:
if not line.startswith("""#"""):
if len(line.split()) > 6:
_A = line.split()[5]
if not parse_col == "-":
_A = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> Union[str, Any]:
_A = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
_A = util.check_gold_parse_annotation(lowerCAmelCase_ )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
_A = evaluate(
key_lines=lowerCAmelCase_ , sys_lines=lowerCAmelCase_ , metrics=lowerCAmelCase_ , NP_only=lowerCAmelCase_ , remove_nested=lowerCAmelCase_ , keep_singletons=lowerCAmelCase_ , min_span=lowerCAmelCase_ , )
return score
| 83 | 1 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = {}
def snake_case ( snake_case__ :type , snake_case__ :Optional[str] , snake_case__ :Optional[List[str]] = None , ) -> str:
_A = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''')
_A = formatter_cls
for alias in set(aliases + [format_type]):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''')
_A = format_type
def snake_case ( snake_case__ :Exception , snake_case__ :Optional[str] , snake_case__ :Optional[List[str]] = None) -> Optional[int]:
_A = aliases if aliases is not None else []
for alias in set(aliases + [format_type]):
_A = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
_SCREAMING_SNAKE_CASE = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
_SCREAMING_SNAKE_CASE = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
_SCREAMING_SNAKE_CASE = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def snake_case ( snake_case__ :Optional[str]) -> Optional[str]:
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def snake_case ( snake_case__ :Optional[str] , **snake_case__ :Union[str, Any]) -> Formatter:
_A = get_format_type_from_alias(snake_case__)
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**snake_case__)
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None)}, but got \'{format_type}\'''')
| 83 | import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
_SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
_SCREAMING_SNAKE_CASE = {'facebook/blenderbot_small-90M': 512}
def snake_case ( snake_case__ :Tuple) -> str:
_A = set()
_A = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
_A = char
_A = set(snake_case__)
return pairs
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :List[Any] = VOCAB_FILES_NAMES
lowerCamelCase :Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase :List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase :int = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="__start__" , lowerCAmelCase_="__end__" , lowerCAmelCase_="__unk__" , lowerCAmelCase_="__null__" , **lowerCAmelCase_ , ) -> int:
super().__init__(unk_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , **lowerCAmelCase_ )
with open(lowerCAmelCase_ , encoding="""utf-8""" ) as vocab_handle:
_A = json.load(lowerCAmelCase_ )
_A = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase_ , encoding="""utf-8""" ) as merges_handle:
_A = merges_handle.read().split("""\n""" )[1:-1]
_A = [tuple(merge.split() ) for merge in merges]
_A = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_A = {}
@property
def UpperCAmelCase ( self ) -> int:
return len(self.encoder )
def UpperCAmelCase ( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str:
if token in self.cache:
return self.cache[token]
_A = re.sub("""([.,!?()])""" , r""" \1""" , lowerCAmelCase_ )
_A = re.sub("""(')""" , r""" \1 """ , lowerCAmelCase_ )
_A = re.sub(r"""\s{2,}""" , """ """ , lowerCAmelCase_ )
if "\n" in token:
_A = token.replace("""\n""" , """ __newln__""" )
_A = token.split(""" """ )
_A = []
for token in tokens:
if not len(lowerCAmelCase_ ):
continue
_A = token.lower()
_A = tuple(lowerCAmelCase_ )
_A = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
_A = get_pairs(lowerCAmelCase_ )
if not pairs:
words.append(lowerCAmelCase_ )
continue
while True:
_A = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A = bigram
_A = []
_A = 0
while i < len(lowerCAmelCase_ ):
try:
_A = word.index(lowerCAmelCase_ , lowerCAmelCase_ )
new_word.extend(word[i:j] )
_A = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A = tuple(lowerCAmelCase_ )
_A = new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
_A = get_pairs(lowerCAmelCase_ )
_A = """@@ """.join(lowerCAmelCase_ )
_A = word[:-4]
_A = word
words.append(lowerCAmelCase_ )
return " ".join(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[str]:
_A = []
_A = re.findall(r"""\S+\n?""" , lowerCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(""" """ ) ) )
return split_tokens
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int:
_A = token.lower()
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str:
return self.decoder.get(lowerCAmelCase_ , self.unk_token )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str:
_A = """ """.join(lowerCAmelCase_ ).replace("""@@ """ , """""" ).strip()
return out_string
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_A = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + """\n""" )
_A = 0
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
_A = token_index
writer.write(""" """.join(lowerCAmelCase_ ) + """\n""" )
index += 1
return vocab_file, merge_file
| 83 | 1 |
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , *lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> List[Any]:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
_A = eval_examples
_A = post_process_function
def UpperCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_ = "eval" ) -> Tuple:
_A = self.eval_dataset if eval_dataset is None else eval_dataset
_A = self.get_eval_dataloader(lowerCAmelCase_ )
_A = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_A = self.compute_metrics
_A = None
_A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_A = time.time()
try:
_A = eval_loop(
lowerCAmelCase_ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCAmelCase_ , metric_key_prefix=lowerCAmelCase_ , )
finally:
_A = compute_metrics
_A = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
lowerCAmelCase_ , lowerCAmelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_A = self.post_process_function(lowerCAmelCase_ , lowerCAmelCase_ , output.predictions )
_A = self.compute_metrics(lowerCAmelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
_A = metrics.pop(lowerCAmelCase_ )
metrics.update(output.metrics )
else:
_A = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowerCAmelCase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_A = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowerCAmelCase_ )
return metrics
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_ = "test" ) -> Dict:
_A = self.get_test_dataloader(lowerCAmelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
_A = self.compute_metrics
_A = None
_A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_A = time.time()
try:
_A = eval_loop(
lowerCAmelCase_ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCAmelCase_ , metric_key_prefix=lowerCAmelCase_ , )
finally:
_A = compute_metrics
_A = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
lowerCAmelCase_ , lowerCAmelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
_A = self.post_process_function(lowerCAmelCase_ , lowerCAmelCase_ , output.predictions , """predict""" )
_A = self.compute_metrics(lowerCAmelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
_A = metrics.pop(lowerCAmelCase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowerCAmelCase_ )
| 83 | _SCREAMING_SNAKE_CASE = {
'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.',
'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.',
'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----',
'2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...',
'8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.',
':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.',
'?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-',
'(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/'
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
_SCREAMING_SNAKE_CASE = {value: key for key, value in MORSE_CODE_DICT.items()}
def snake_case ( snake_case__ :str) -> str:
return " ".join(MORSE_CODE_DICT[char] for char in message.upper())
def snake_case ( snake_case__ :str) -> str:
return "".join(REVERSE_DICT[char] for char in message.split())
def snake_case ( ) -> None:
_A = """Morse code here!"""
print(snake_case__)
_A = encrypt(snake_case__)
print(snake_case__)
_A = decrypt(snake_case__)
print(snake_case__)
if __name__ == "__main__":
main()
| 83 | 1 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :int = ['''image_processor''', '''tokenizer''']
lowerCamelCase :str = '''Pix2StructImageProcessor'''
lowerCamelCase :Tuple = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
_A = False
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_=None , lowerCAmelCase_ = None , lowerCAmelCase_ = True , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 20_48 , lowerCAmelCase_ = 0 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = True , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None and not self.image_processor.is_vqa:
_A = self.tokenizer
_A = self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
_A = self.image_processor(
lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , max_patches=lowerCAmelCase_ , **lowerCAmelCase_ )
else:
# add pixel_values and bbox
_A = self.image_processor(
lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , max_patches=lowerCAmelCase_ , header_text=lowerCAmelCase_ , **lowerCAmelCase_ )
if text is not None and not self.image_processor.is_vqa:
_A = self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
if "attention_mask" in text_encoding:
_A = text_encoding.pop("""attention_mask""" )
if "input_ids" in text_encoding:
_A = text_encoding.pop("""input_ids""" )
else:
_A = None
if text_encoding is not None:
encoding_image_processor.update(lowerCAmelCase_ )
return encoding_image_processor
def UpperCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[Any]:
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def UpperCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[Any]:
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def UpperCAmelCase ( self ) -> List[str]:
_A = self.tokenizer.model_input_names
_A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 83 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 83 | 1 |
from collections import defaultdict
def snake_case ( snake_case__ :int) -> int:
_A = 1
_A = True
for v in tree[start]:
if v not in visited:
ret += dfs(snake_case__)
if ret % 2 == 0:
cuts.append(snake_case__)
return ret
def snake_case ( ) -> Any:
dfs(1)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 10, 9
_SCREAMING_SNAKE_CASE = defaultdict(list)
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 83 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Tuple = '''philschmid/bart-large-cnn-samsum'''
lowerCamelCase :Tuple = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
lowerCamelCase :List[Any] = '''summarizer'''
lowerCamelCase :List[str] = AutoTokenizer
lowerCamelCase :Dict = AutoModelForSeqaSeqLM
lowerCamelCase :int = ['''text''']
lowerCamelCase :List[Any] = ['''text''']
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
return self.pre_processor(lowerCAmelCase_ , return_tensors="""pt""" , truncation=lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
return self.model.generate(**lowerCAmelCase_ )[0]
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
return self.pre_processor.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
| 83 | 1 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_SCREAMING_SNAKE_CASE = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
_SCREAMING_SNAKE_CASE = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
_SCREAMING_SNAKE_CASE = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/krishnap25/mauve""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/krishnap25/mauve"""] , reference_urls=[
"""https://arxiv.org/abs/2102.01454""",
"""https://github.com/krishnap25/mauve""",
] , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_="auto" , lowerCAmelCase_=-1 , lowerCAmelCase_=0.9 , lowerCAmelCase_=5 , lowerCAmelCase_=5_00 , lowerCAmelCase_="gpt2-large" , lowerCAmelCase_=-1 , lowerCAmelCase_=10_24 , lowerCAmelCase_=25 , lowerCAmelCase_=5 , lowerCAmelCase_=True , lowerCAmelCase_=25 , ) -> Optional[int]:
_A = compute_mauve(
p_text=lowerCAmelCase_ , q_text=lowerCAmelCase_ , p_features=lowerCAmelCase_ , q_features=lowerCAmelCase_ , p_tokens=lowerCAmelCase_ , q_tokens=lowerCAmelCase_ , num_buckets=lowerCAmelCase_ , pca_max_data=lowerCAmelCase_ , kmeans_explained_var=lowerCAmelCase_ , kmeans_num_redo=lowerCAmelCase_ , kmeans_max_iter=lowerCAmelCase_ , featurize_model_name=lowerCAmelCase_ , device_id=lowerCAmelCase_ , max_text_length=lowerCAmelCase_ , divergence_curve_discretization_size=lowerCAmelCase_ , mauve_scaling_factor=lowerCAmelCase_ , verbose=lowerCAmelCase_ , seed=lowerCAmelCase_ , )
return out
| 83 | import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
_SCREAMING_SNAKE_CASE = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def snake_case ( snake_case__ :Union[str, Any]) -> Dict:
_A = torch.load(snake_case__ , map_location="""cpu""")
return sd
def snake_case ( snake_case__ :List[str] , snake_case__ :Optional[Any] , snake_case__ :int=rename_keys_prefix) -> Optional[Any]:
_A = OrderedDict()
_A = torch.arange(config.max_position_embeddings).expand((1, -1))
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_A = key
for name_pair in rename_keys_prefix:
_A = new_key.replace(name_pair[0] , name_pair[1])
_A = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_A = new_d["""cls.predictions.bias"""]
return new_d
@torch.no_grad()
def snake_case ( snake_case__ :Tuple , snake_case__ :Tuple) -> int:
assert (
checkpoint_path.split("""/""")[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
_A = """pretraining"""
if "vcr" in checkpoint_path:
_A = {"""visual_embedding_dim""": 512}
elif "vqa_advanced" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048}
elif "vqa" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048}
elif "nlvr" in checkpoint_path:
_A = {"""visual_embedding_dim""": 1_024}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''')
else:
if "vcr" in checkpoint_path:
_A = {"""visual_embedding_dim""": 512}
_A = """multichoice"""
elif "vqa_advanced" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048}
_A = """vqa_advanced"""
elif "vqa" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048, """num_labels""": 3_129}
_A = """vqa"""
elif "nlvr" in checkpoint_path:
_A = {
"""visual_embedding_dim""": 1_024,
"""num_labels""": 2,
}
_A = """nlvr"""
_A = VisualBertConfig(**snake_case__)
# Load State Dict
_A = load_state_dict(snake_case__)
_A = get_new_dict(snake_case__ , snake_case__)
if model_type == "pretraining":
_A = VisualBertForPreTraining(snake_case__)
elif model_type == "vqa":
_A = VisualBertForQuestionAnswering(snake_case__)
elif model_type == "nlvr":
_A = VisualBertForVisualReasoning(snake_case__)
elif model_type == "multichoice":
_A = VisualBertForMultipleChoice(snake_case__)
model.load_state_dict(snake_case__)
# Save Checkpoints
Path(snake_case__).mkdir(exist_ok=snake_case__)
model.save_pretrained(snake_case__)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 83 | 1 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_SCREAMING_SNAKE_CASE = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_SCREAMING_SNAKE_CASE = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> str:
_A = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , """schedulers/""" ) )
_A = self.diffusers_dir
shutil.copy(
os.path.join(lowerCAmelCase_ , """src/diffusers/schedulers/scheduling_ddpm.py""" ) , os.path.join(self.diffusers_dir , """schedulers/scheduling_ddpm.py""" ) , )
def UpperCAmelCase ( self ) -> str:
_A = """src/diffusers"""
shutil.rmtree(self.diffusers_dir )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Union[str, Any]:
_A = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
_A = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
_A = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
_A = black.format_str(lowerCAmelCase_ , mode=lowerCAmelCase_ )
_A = os.path.join(self.diffusers_dir , """new_code.py""" )
with open(lowerCAmelCase_ , """w""" , newline="""\n""" ) as f:
f.write(lowerCAmelCase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCAmelCase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCAmelCase_ )
with open(lowerCAmelCase_ , """r""" ) as f:
self.assertTrue(f.read() , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = check_copies.find_code_in_diffusers("""schedulers.scheduling_ddpm.DDPMSchedulerOutput""" )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
# Base copy consistency
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , lowerCAmelCase_ , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , re.sub("""DDPM""" , """Test""" , lowerCAmelCase_ ) , )
# Copy consistency with a really long name
_A = """TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , F'''{long_class_name}SchedulerOutput''' , re.sub("""Bert""" , lowerCAmelCase_ , lowerCAmelCase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , lowerCAmelCase_ , overwrite_result=re.sub("""DDPM""" , """Test""" , lowerCAmelCase_ ) , )
| 83 | from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class a ( __lowerCAmelCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[str]:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def UpperCAmelCase ( self ) -> Optional[int]:
_A = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self._create_example_records()
_A = Dataset.from_list(lowerCAmelCase_ )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(lowerCAmelCase_ ):
self.assertDictEqual(lowerCAmelCase_ , example_records[i] )
def UpperCAmelCase ( self ) -> str:
_A = self._create_example_records()
_A = Dataset.from_list(lowerCAmelCase_ )
_A = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def UpperCAmelCase ( self ) -> Any: # checks what happens with missing columns
_A = [{"""col_1""": 1}, {"""col_2""": """x"""}]
_A = Dataset.from_list(lowerCAmelCase_ )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def UpperCAmelCase ( self ) -> Tuple: # checks if the type can be inferred from the second record
_A = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
_A = Dataset.from_list(lowerCAmelCase_ )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def UpperCAmelCase ( self ) -> Any:
_A = Dataset.from_list([] )
self.assertEqual(len(lowerCAmelCase_ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 83 | 1 |
from collections import namedtuple
_SCREAMING_SNAKE_CASE = namedtuple('from_to', 'from_ to')
_SCREAMING_SNAKE_CASE = {
'cubicmeter': from_to(1, 1),
'litre': from_to(0.001, 1_000),
'kilolitre': from_to(1, 1),
'gallon': from_to(0.00_454, 264.172),
'cubicyard': from_to(0.76_455, 1.30_795),
'cubicfoot': from_to(0.028, 35.3_147),
'cup': from_to(0.000_236_588, 4_226.75),
}
def snake_case ( snake_case__ :float , snake_case__ :str , snake_case__ :str) -> float:
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ """, """.join(snake_case__))
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ """, """.join(snake_case__))
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | def snake_case ( snake_case__ :int = 1_000_000) -> int:
_A = set(range(3 , snake_case__ , 2))
primes.add(2)
for p in range(3 , snake_case__ , 2):
if p not in primes:
continue
primes.difference_update(set(range(p * p , snake_case__ , snake_case__)))
_A = [float(snake_case__) for n in range(limit + 1)]
for p in primes:
for n in range(snake_case__ , limit + 1 , snake_case__):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:]))
if __name__ == "__main__":
print(F'''{solution() = }''')
| 83 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 83 | import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_="None" , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ) -> Union[str, Any]:
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = relative_attention
_A = position_biased_input
_A = pos_att_type
_A = scope
def UpperCAmelCase ( self ) -> Dict:
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self ) -> Optional[int]:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Any:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_A = DebertaVaModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
_A = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
_A = model(lowerCAmelCase_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_A = DebertaVaForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_A = self.num_labels
_A = DebertaVaForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_A = self.num_labels
_A = DebertaVaForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
_A = DebertaVaForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_A = DebertaVaForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :int = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCamelCase :str = (
{
'''feature-extraction''': DebertaVaModel,
'''fill-mask''': DebertaVaForMaskedLM,
'''question-answering''': DebertaVaForQuestionAnswering,
'''text-classification''': DebertaVaForSequenceClassification,
'''token-classification''': DebertaVaForTokenClassification,
'''zero-shot''': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase :str = True
lowerCamelCase :Union[str, Any] = False
lowerCamelCase :Optional[int] = False
lowerCamelCase :List[str] = False
lowerCamelCase :str = False
def UpperCAmelCase ( self ) -> Optional[int]:
_A = DebertaVaModelTester(self )
_A = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def UpperCAmelCase ( self ) -> List[str]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> List[str]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Any:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> int:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*lowerCAmelCase_ )
@slow
def UpperCAmelCase ( self ) -> Any:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = DebertaVaModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="""Model not available yet""" )
def UpperCAmelCase ( self ) -> int:
pass
@slow
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
_A = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
_A = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
# compare the actual values for a slice.
_A = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1E-4 ) , F'''{output[:, 1:4, 1:4]}''' )
| 83 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 83 | def snake_case ( snake_case__ :int , snake_case__ :int) -> int:
return int(input_a == input_a == 0)
def snake_case ( ) -> None:
print("""Truth Table of NOR Gate:""")
print("""| Input 1 | Input 2 | Output |""")
print(F'''| 0 | 0 | {nor_gate(0 , 0)} |''')
print(F'''| 0 | 1 | {nor_gate(0 , 1)} |''')
print(F'''| 1 | 0 | {nor_gate(1 , 0)} |''')
print(F'''| 1 | 1 | {nor_gate(1 , 1)} |''')
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 83 | 1 |
from ..utils import DummyObject, requires_backends
class a ( metaclass=__lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Union[str, Any] = ['''transformers''', '''torch''', '''note_seq''']
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> str:
requires_backends(self , ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def UpperCAmelCase ( cls , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]:
requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def UpperCAmelCase ( cls , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> int:
requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
| 83 | import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=sys.maxsize ) -> str:
_A = """bilinear"""
_A = max_size
_A = short_edge_length
def __call__( self , lowerCAmelCase_ ) -> Optional[Any]:
_A = []
for img in imgs:
_A , _A = img.shape[:2]
# later: provide list and randomly choose index for resize
_A = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
_A = size * 1.0 / min(lowerCAmelCase_ , lowerCAmelCase_ )
if h < w:
_A , _A = size, scale * w
else:
_A , _A = scale * h, size
if max(lowerCAmelCase_ , lowerCAmelCase_ ) > self.max_size:
_A = self.max_size * 1.0 / max(lowerCAmelCase_ , lowerCAmelCase_ )
_A = newh * scale
_A = neww * scale
_A = int(neww + 0.5 )
_A = int(newh + 0.5 )
if img.dtype == np.uinta:
_A = Image.fromarray(lowerCAmelCase_ )
_A = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
_A = np.asarray(lowerCAmelCase_ )
else:
_A = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
_A = nn.functional.interpolate(
lowerCAmelCase_ , (newh, neww) , mode=self.interp_method , align_corners=lowerCAmelCase_ ).squeeze(0 )
img_augs.append(lowerCAmelCase_ )
return img_augs
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ ) -> List[Any]:
_A = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
_A = cfg.INPUT.FORMAT
_A = cfg.SIZE_DIVISIBILITY
_A = cfg.PAD_VALUE
_A = cfg.INPUT.MAX_SIZE_TEST
_A = cfg.MODEL.DEVICE
_A = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_A = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_A = lambda lowerCAmelCase_ : (x - self.pixel_mean) / self.pixel_std
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
_A = tuple(max(lowerCAmelCase_ ) for s in zip(*[img.shape for img in images] ) )
_A = [im.shape[-2:] for im in images]
_A = [
nn.functional.pad(
lowerCAmelCase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(lowerCAmelCase_ , lowerCAmelCase_ )
]
return torch.stack(lowerCAmelCase_ ), torch.tensor(lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_=False ) -> int:
with torch.no_grad():
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A = [images]
if single_image:
assert len(lowerCAmelCase_ ) == 1
for i in range(len(lowerCAmelCase_ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(lowerCAmelCase_ , images.pop(lowerCAmelCase_ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
lowerCAmelCase_ , torch.as_tensor(img_tensorize(images.pop(lowerCAmelCase_ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
_A = torch.tensor([im.shape[:2] for im in images] )
_A = self.aug(lowerCAmelCase_ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
_A = [self.normalizer(lowerCAmelCase_ ) for x in images]
# now pad them to do the following operations
_A , _A = self.pad(lowerCAmelCase_ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
_A = torch.true_divide(lowerCAmelCase_ , lowerCAmelCase_ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def snake_case ( snake_case__ :Optional[int] , snake_case__ :Optional[Any]) -> Tuple:
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def snake_case ( snake_case__ :Optional[int] , snake_case__ :Tuple[int, int]) -> Optional[Any]:
assert torch.isfinite(snake_case__).all(), "Box tensor contains infinite or NaN!"
_A , _A = box_size
tensor[:, 0].clamp_(min=0 , max=snake_case__)
tensor[:, 1].clamp_(min=0 , max=snake_case__)
tensor[:, 2].clamp_(min=0 , max=snake_case__)
tensor[:, 3].clamp_(min=0 , max=snake_case__)
| 83 | 1 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def snake_case ( ) -> List[str]:
_A = """https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"""
_A = Image.open(requests.get(snake_case__ , stream=snake_case__).raw).convert("""RGB""")
return image
def snake_case ( snake_case__ :Dict) -> Tuple:
_A = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding"""))
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding"""))
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight"""))
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias"""))
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight"""))
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias"""))
for i in range(config.vision_config.num_hidden_layers):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight'''))
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias'''))
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight'''))
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias'''))
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight'''))
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',))
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias'''))
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight'''))
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias'''))
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight'''))
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias'''))
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.embeddings.layernorm.weight"""))
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.embeddings.layernorm.bias"""))
# fmt: on
return rename_keys
def snake_case ( snake_case__ :List[Any] , snake_case__ :Dict , snake_case__ :str) -> Any:
_A = dct.pop(snake_case__)
_A = val
def snake_case ( snake_case__ :str , snake_case__ :List[str]) -> Optional[int]:
for i in range(config.vision_config.num_hidden_layers):
# read in original q and v biases
_A = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''')
_A = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''')
# next, set bias in the state dict
_A = torch.cat((q_bias, torch.zeros_like(snake_case__ , requires_grad=snake_case__), v_bias))
_A = qkv_bias
def snake_case ( snake_case__ :List[Any]) -> Any:
_A = 364 if """coco""" in model_name else 224
_A = InstructBlipVisionConfig(image_size=snake_case__).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
_A = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1).to_dict()
elif "t5-xxl" in model_name:
_A = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1).to_dict()
elif "vicuna-7b" in model_name:
_A = LlamaConfig.from_pretrained("""decapoda-research/llama-7b-hf""" , vocab_size=32_001).to_dict()
elif "vicuna-13b" in model_name:
_A = LlamaConfig.from_pretrained("""decapoda-research/llama-13b-hf""" , vocab_size=32_001).to_dict()
else:
raise ValueError("""Model name not supported""")
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
_A = InstructBlipQFormerConfig(vocab_size=30_523).to_dict()
_A = InstructBlipConfig(vision_config=snake_case__ , text_config=snake_case__ , qformer_config=snake_case__)
return config, image_size
@torch.no_grad()
def snake_case ( snake_case__ :str , snake_case__ :Union[str, Any]=None , snake_case__ :Optional[Any]=False) -> List[str]:
_A = AutoTokenizer.from_pretrained("""bert-base-uncased""" , truncation_side="""left""")
qformer_tokenizer.add_special_tokens({"""bos_token""": """[DEC]"""})
if "t5" in model_name:
_A = TaTokenizerFast.from_pretrained("""google/flan-t5-xl""" , truncation_side="""left""")
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
_A = LlamaTokenizerFast.from_pretrained(
"""huggyllama/llama-7b""" , truncation_side="""left""" , bos_token="""</s>""" , unk_token="""</s>""")
tokenizer.add_special_tokens({"""pad_token""": """[PAD]"""})
_A , _A = get_blipa_config(snake_case__)
_A = InstructBlipForConditionalGeneration(snake_case__).eval()
_A = {
"""instructblip-vicuna-7b""": ("""blip2_vicuna_instruct""", """vicuna7b"""),
"""instructblip-vicuna-13b""": ("""blip2_vicuna_instruct""", """vicuna13b"""),
"""instructblip-flan-t5-xl""": ("""blip2_t5_instruct""", """flant5xl"""),
"""instructblip-flan-t5-xxl""": ("""blip2_t5_instruct""", """flant5xxl"""),
}
_A , _A = model_name_to_original[model_name]
# load original model
print("""Loading original model...""")
_A = """cuda:1""" if torch.cuda.is_available() else """cpu"""
_A = """cuda:2""" if torch.cuda.is_available() else """cpu"""
_A , _A , _A = load_model_and_preprocess(
name=snake_case__ , model_type=snake_case__ , is_eval=snake_case__ , device=snake_case__)
original_model.eval()
print("""Done!""")
# update state dict keys
_A = original_model.state_dict()
_A = create_rename_keys(snake_case__)
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__)
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_A = state_dict.pop(snake_case__)
if key.startswith("""Qformer.bert"""):
_A = key.replace("""Qformer.bert""" , """qformer""")
if "attention.self" in key:
_A = key.replace("""self""" , """attention""")
if "llm_proj" in key:
_A = key.replace("""llm_proj""" , """language_projection""")
if "t5_proj" in key:
_A = key.replace("""t5_proj""" , """language_projection""")
if key.startswith("""llm_model"""):
_A = key.replace("""llm_model""" , """language_model""")
if key.startswith("""t5"""):
_A = key.replace("""t5""" , """language""")
_A = val
# read in qv biases
read_in_q_v_bias(snake_case__ , snake_case__)
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(snake_case__ , strict=snake_case__)
_A = load_demo_image()
_A = """What is unusual about this image?"""
# create processor
_A = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=snake_case__ , image_std=snake_case__)
_A = InstructBlipProcessor(
image_processor=snake_case__ , tokenizer=snake_case__ , qformer_tokenizer=snake_case__ , )
_A = processor(images=snake_case__ , text=snake_case__ , return_tensors="""pt""").to(snake_case__)
# make sure processor creates exact same pixel values
_A = vis_processors["""eval"""](snake_case__).unsqueeze(0).to(snake_case__)
_A = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device) , snake_case__)
original_model.to(snake_case__)
hf_model.to(snake_case__)
with torch.no_grad():
if "vicuna" in model_name:
_A = original_model({"""image""": original_pixel_values, """text_input""": [prompt]}).logits
_A = hf_model(**snake_case__).logits
else:
_A = original_model(
{"""image""": original_pixel_values, """text_input""": [prompt], """text_output""": ["""\n"""]}).logits
_A = tokenizer("""\n""" , return_tensors="""pt""").input_ids.to(snake_case__)
_A = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100)
_A = hf_model(**snake_case__ , labels=snake_case__).logits
print("""First values of original logits:""" , original_logits[0, :3, :3])
print("""First values of HF logits:""" , logits[0, :3, :3])
# assert values
assert original_logits.shape == logits.shape
_A = 1E-4 if """vicuna""" in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device) , snake_case__ , atol=snake_case__)
print("""Looks ok!""")
print("""Generating with original model...""")
_A = original_model.generate({"""image""": original_pixel_values, """prompt""": prompt} , num_beams=5)
# important: we need to cast the weights of the HF model to the appropriate type
print("""Generating with HF model...""")
_A = hf_model.generate(
**snake_case__ , do_sample=snake_case__ , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
_A = 2
print("""Original generation:""" , snake_case__)
_A = processor.batch_decode(snake_case__ , skip_special_tokens=snake_case__)
_A = [text.strip() for text in output_text]
print("""HF generation:""" , snake_case__)
if pytorch_dump_folder_path is not None:
processor.save_pretrained(snake_case__)
hf_model.save_pretrained(snake_case__)
if push_to_hub:
processor.push_to_hub(F'''Salesforce/{model_name}''')
hf_model.push_to_hub(F'''Salesforce/{model_name}''')
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE = [
'instructblip-vicuna-7b',
'instructblip-vicuna-13b',
'instructblip-flan-t5-xl',
'instructblip-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='instructblip-flan-t5-xl',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 83 | from collections import defaultdict
def snake_case ( snake_case__ :int) -> int:
_A = 1
_A = True
for v in tree[start]:
if v not in visited:
ret += dfs(snake_case__)
if ret % 2 == 0:
cuts.append(snake_case__)
return ret
def snake_case ( ) -> Any:
dfs(1)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 10, 9
_SCREAMING_SNAKE_CASE = defaultdict(list)
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 83 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.txt'}
_SCREAMING_SNAKE_CASE = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
_SCREAMING_SNAKE_CASE = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
_SCREAMING_SNAKE_CASE = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Union[str, Any] = VOCAB_FILES_NAMES
lowerCamelCase :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase :int = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase :List[Any] = ConvBertTokenizer
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_="[UNK]" , lowerCAmelCase_="[SEP]" , lowerCAmelCase_="[PAD]" , lowerCAmelCase_="[CLS]" , lowerCAmelCase_="[MASK]" , lowerCAmelCase_=True , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> Dict:
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
_A = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCAmelCase_ ) != tokenize_chinese_chars
):
_A = getattr(lowerCAmelCase_ , normalizer_state.pop("""type""" ) )
_A = do_lower_case
_A = strip_accents
_A = tokenize_chinese_chars
_A = normalizer_class(**lowerCAmelCase_ )
_A = do_lower_case
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ) -> List[Any]:
_A = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]:
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]:
_A = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 83 | import heapq
def snake_case ( snake_case__ :dict) -> set[int]:
_A = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(snake_case__ , [-1 * len(snake_case__), (key, value)])
# chosen_vertices = set of chosen vertices
_A = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
_A = heapq.heappop(snake_case__)[1][0]
chosen_vertices.add(snake_case__)
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
_A = elem[1][1].index(snake_case__)
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(snake_case__)
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 83 | 1 |
import math
import unittest
def snake_case ( snake_case__ :int) -> bool:
assert isinstance(snake_case__ , snake_case__) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def UpperCAmelCase ( self ) -> Dict:
with self.assertRaises(lowerCAmelCase_ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 83 | import math
import unittest
def snake_case ( snake_case__ :int) -> bool:
assert isinstance(snake_case__ , snake_case__) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def UpperCAmelCase ( self ) -> Dict:
with self.assertRaises(lowerCAmelCase_ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 83 | 1 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def snake_case ( snake_case__ :str , snake_case__ :str , snake_case__ :str , snake_case__ :PreTrainedTokenizer , snake_case__ :int , snake_case__ :Optional[int] = None , ) -> Optional[int]:
_A = {}
if train_file is not None:
_A = [train_file]
if eval_file is not None:
_A = [eval_file]
if test_file is not None:
_A = [test_file]
_A = datasets.load_dataset("""csv""" , data_files=snake_case__)
_A = list(ds[list(files.keys())[0]].features.keys())
_A = features_name.pop(snake_case__)
_A = list(set(ds[list(files.keys())[0]][label_name]))
_A = {label: i for i, label in enumerate(snake_case__)}
_A = tokenizer.model_input_names
_A = {}
if len(snake_case__) == 1:
for k in files.keys():
_A = ds[k].map(
lambda snake_case__: tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=snake_case__ , max_length=snake_case__ , padding="""max_length""") , batched=snake_case__ , )
elif len(snake_case__) == 2:
for k in files.keys():
_A = ds[k].map(
lambda snake_case__: tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=snake_case__ , max_length=snake_case__ , padding="""max_length""" , ) , batched=snake_case__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_A = {k: v for k, v in ex.items() if k in input_names}
_A = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_A = {k: v for k, v in ex.items() if k in input_names}
_A = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_A = {k: v for k, v in ex.items() if k in input_names}
_A = labelaid[ex[label_name]]
yield (d, label)
_A = (
tf.data.Dataset.from_generator(
snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_A = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN])))
_A = (
tf.data.Dataset.from_generator(
snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_A = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION])))
_A = (
tf.data.Dataset.from_generator(
snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_A = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST])))
return train_ds, val_ds, test_ds, labelaid
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class a :
"""simple docstring"""
lowerCamelCase :int = field(metadata={'''help''': '''Which column contains the label'''} )
lowerCamelCase :str = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the training file'''} )
lowerCamelCase :Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the development file'''} )
lowerCamelCase :Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the test file'''} )
lowerCamelCase :int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowerCamelCase :bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class a :
"""simple docstring"""
lowerCamelCase :str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowerCamelCase :bool = field(default=__lowerCAmelCase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def snake_case ( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments))
_A , _A , _A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""")
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(
F'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1)}, '''
F'''16-bits training: {training_args.fpaa}''')
logger.info(F'''Training/evaluation parameters {training_args}''')
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_A , _A , _A , _A = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=snake_case__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
_A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(snake_case__) , labelaid=snake_case__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
_A = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path) , config=snake_case__ , cache_dir=model_args.cache_dir , )
def compute_metrics(snake_case__ :EvalPrediction) -> Dict:
_A = np.argmax(p.predictions , axis=1)
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_A = TFTrainer(
model=snake_case__ , args=snake_case__ , train_dataset=snake_case__ , eval_dataset=snake_case__ , compute_metrics=snake_case__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
_A = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""")
_A = trainer.evaluate()
_A = os.path.join(training_args.output_dir , """eval_results.txt""")
with open(snake_case__ , """w""") as writer:
logger.info("""***** Eval results *****""")
for key, value in result.items():
logger.info(F''' {key} = {value}''')
writer.write(F'''{key} = {value}\n''')
results.update(snake_case__)
return results
if __name__ == "__main__":
main()
| 83 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 83 | 1 |
from math import isqrt
def snake_case ( snake_case__ :int) -> list[int]:
_A = [True] * max_number
for i in range(2 , isqrt(max_number - 1) + 1):
if is_prime[i]:
for j in range(i**2 , snake_case__ , snake_case__):
_A = False
return [i for i in range(2 , snake_case__) if is_prime[i]]
def snake_case ( snake_case__ :int = 10**8) -> int:
_A = calculate_prime_numbers(max_number // 2)
_A = 0
_A = 0
_A = len(snake_case__) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 83 | from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase )
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
self.check_model_type(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Tuple:
_A , _A = {}, {}
if padding is not None:
_A = padding
if truncation is not None:
_A = truncation
if top_k is not None:
_A = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ ) -> Union[str, Any]:
if isinstance(lowerCAmelCase_ , (Image.Image, str) ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A = {"""image""": image, """question""": question}
else:
_A = image
_A = super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ )
return results
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> Any:
_A = load_image(inputs["""image"""] )
_A = self.tokenizer(
inputs["""question"""] , return_tensors=self.framework , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ )
_A = self.image_processor(images=lowerCAmelCase_ , return_tensors=self.framework )
model_inputs.update(lowerCAmelCase_ )
return model_inputs
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
_A = self.model(**lowerCAmelCase_ )
return model_outputs
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=5 ) -> Union[str, Any]:
if top_k > self.model.config.num_labels:
_A = self.model.config.num_labels
if self.framework == "pt":
_A = model_outputs.logits.sigmoid()[0]
_A , _A = probs.topk(lowerCAmelCase_ )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
_A = scores.tolist()
_A = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowerCAmelCase_ , lowerCAmelCase_ )]
| 83 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Tuple = '''timm_backbone'''
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=3 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> List[str]:
super().__init__(**lowerCAmelCase_ )
_A = backbone
_A = num_channels
_A = features_only
_A = use_pretrained_backbone
_A = True
_A = out_indices if out_indices is not None else (-1,)
| 83 | import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def snake_case ( snake_case__ :str , snake_case__ :str , snake_case__ :str , snake_case__ :PreTrainedTokenizer , snake_case__ :int , snake_case__ :Optional[int] = None , ) -> Optional[int]:
_A = {}
if train_file is not None:
_A = [train_file]
if eval_file is not None:
_A = [eval_file]
if test_file is not None:
_A = [test_file]
_A = datasets.load_dataset("""csv""" , data_files=snake_case__)
_A = list(ds[list(files.keys())[0]].features.keys())
_A = features_name.pop(snake_case__)
_A = list(set(ds[list(files.keys())[0]][label_name]))
_A = {label: i for i, label in enumerate(snake_case__)}
_A = tokenizer.model_input_names
_A = {}
if len(snake_case__) == 1:
for k in files.keys():
_A = ds[k].map(
lambda snake_case__: tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=snake_case__ , max_length=snake_case__ , padding="""max_length""") , batched=snake_case__ , )
elif len(snake_case__) == 2:
for k in files.keys():
_A = ds[k].map(
lambda snake_case__: tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=snake_case__ , max_length=snake_case__ , padding="""max_length""" , ) , batched=snake_case__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_A = {k: v for k, v in ex.items() if k in input_names}
_A = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_A = {k: v for k, v in ex.items() if k in input_names}
_A = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_A = {k: v for k, v in ex.items() if k in input_names}
_A = labelaid[ex[label_name]]
yield (d, label)
_A = (
tf.data.Dataset.from_generator(
snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_A = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN])))
_A = (
tf.data.Dataset.from_generator(
snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_A = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION])))
_A = (
tf.data.Dataset.from_generator(
snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_A = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST])))
return train_ds, val_ds, test_ds, labelaid
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class a :
"""simple docstring"""
lowerCamelCase :int = field(metadata={'''help''': '''Which column contains the label'''} )
lowerCamelCase :str = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the training file'''} )
lowerCamelCase :Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the development file'''} )
lowerCamelCase :Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the test file'''} )
lowerCamelCase :int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowerCamelCase :bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class a :
"""simple docstring"""
lowerCamelCase :str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowerCamelCase :bool = field(default=__lowerCAmelCase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def snake_case ( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments))
_A , _A , _A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""")
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(
F'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1)}, '''
F'''16-bits training: {training_args.fpaa}''')
logger.info(F'''Training/evaluation parameters {training_args}''')
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_A , _A , _A , _A = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=snake_case__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
_A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(snake_case__) , labelaid=snake_case__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
_A = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path) , config=snake_case__ , cache_dir=model_args.cache_dir , )
def compute_metrics(snake_case__ :EvalPrediction) -> Dict:
_A = np.argmax(p.predictions , axis=1)
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_A = TFTrainer(
model=snake_case__ , args=snake_case__ , train_dataset=snake_case__ , eval_dataset=snake_case__ , compute_metrics=snake_case__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
_A = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""")
_A = trainer.evaluate()
_A = os.path.join(training_args.output_dir , """eval_results.txt""")
with open(snake_case__ , """w""") as writer:
logger.info("""***** Eval results *****""")
for key, value in result.items():
logger.info(F''' {key} = {value}''')
writer.write(F'''{key} = {value}\n''')
results.update(snake_case__)
return results
if __name__ == "__main__":
main()
| 83 | 1 |
import torch
from transformers import AutoModel
class a ( torch.nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_="sayef/fsner-bert-base-uncased" ) -> str:
super(lowerCAmelCase_ , self ).__init__()
_A = AutoModel.from_pretrained(lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_A = torch.nn.CosineSimilarity(3 , 1E-08 )
_A = torch.nn.Softmax(dim=1 )
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Any:
return self.bert(**lowerCAmelCase_ ).last_hidden_state
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int:
return token_embeddings.sum(2 , keepdim=lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1 ) -> Union[str, Any]:
return self.softmax(T * self.cos(lowerCAmelCase_ , lowerCAmelCase_ ) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
_A = W_supports["""sizes"""].tolist()
_A = W_supports["""start_token_id"""].item()
_A = W_supports["""end_token_id"""].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_A = self.BERT(**lowerCAmelCase_ )
_A = self.BERT(**lowerCAmelCase_ )
_A = None
_A = None
_A = W_supports["""input_ids"""] == start_token_id
_A = W_supports["""input_ids"""] == end_token_id
for i, size in enumerate(lowerCAmelCase_ ):
if i == 0:
_A = 0
else:
_A = support_sizes[i - 1]
_A = S[s : s + size][start_token_masks[s : s + size]]
_A = S[s : s + size][end_token_masks[s : s + size]]
_A = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_A = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_A = torch.vstack((p_starts, p_start) )
_A = torch.vstack((p_ends, p_end) )
else:
_A = p_start
_A = p_end
return p_starts, p_ends
| 83 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Union[str, Any] = '''speech_to_text'''
lowerCamelCase :List[str] = ['''past_key_values''']
lowerCamelCase :str = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowerCAmelCase_=1_00_00 , lowerCAmelCase_=12 , lowerCAmelCase_=20_48 , lowerCAmelCase_=4 , lowerCAmelCase_=6 , lowerCAmelCase_=20_48 , lowerCAmelCase_=4 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_="relu" , lowerCAmelCase_=2_56 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=2 , lowerCAmelCase_=True , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_=60_00 , lowerCAmelCase_=10_24 , lowerCAmelCase_=2 , lowerCAmelCase_=(5, 5) , lowerCAmelCase_=10_24 , lowerCAmelCase_=80 , lowerCAmelCase_=1 , **lowerCAmelCase_ , ) -> Tuple:
_A = vocab_size
_A = d_model
_A = encoder_ffn_dim
_A = encoder_layers
_A = encoder_attention_heads
_A = decoder_ffn_dim
_A = decoder_layers
_A = decoder_attention_heads
_A = dropout
_A = attention_dropout
_A = activation_dropout
_A = activation_function
_A = init_std
_A = encoder_layerdrop
_A = decoder_layerdrop
_A = use_cache
_A = encoder_layers
_A = scale_embedding # scale factor will be sqrt(d_model) if True
_A = max_source_positions
_A = max_target_positions
_A = num_conv_layers
_A = list(lowerCAmelCase_ )
_A = conv_channels
_A = input_feat_per_channel
_A = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
F'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 83 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :List[str] = '''camembert'''
def __init__( self , lowerCAmelCase_=3_05_22 , lowerCAmelCase_=7_68 , lowerCAmelCase_=12 , lowerCAmelCase_=12 , lowerCAmelCase_=30_72 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-12 , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_="absolute" , lowerCAmelCase_=True , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> int:
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = initializer_range
_A = layer_norm_eps
_A = position_embedding_type
_A = use_cache
_A = classifier_dropout
class a ( __lowerCAmelCase ):
"""simple docstring"""
@property
def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_A = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_A = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 83 | from __future__ import annotations
from collections.abc import Callable
def snake_case ( snake_case__ :Callable[[int | float], int | float] , snake_case__ :int | float , snake_case__ :int | float , snake_case__ :int = 100 , ) -> float:
_A = x_start
_A = fnc(snake_case__)
_A = 0.0
for _ in range(snake_case__):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
_A = (x_end - x_start) / steps + xa
_A = fnc(snake_case__)
area += abs(fxa + fxa) * (xa - xa) / 2
# Increment step
_A = xa
_A = fxa
return area
if __name__ == "__main__":
def snake_case ( snake_case__ :Tuple) -> List[str]:
return x**3 + x**2
print('f(x) = x^3 + x^2')
print('The area between the curve, x = -5, x = 5 and the x axis is:')
_SCREAMING_SNAKE_CASE = 10
while i <= 100_000:
print(F'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10
| 83 | 1 |
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
_A = name
_A = value
_A = weight
def __repr__( self ) -> Tuple:
return F'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def UpperCAmelCase ( self ) -> List[str]:
return self.value
def UpperCAmelCase ( self ) -> Tuple:
return self.name
def UpperCAmelCase ( self ) -> Tuple:
return self.weight
def UpperCAmelCase ( self ) -> Union[str, Any]:
return self.value / self.weight
def snake_case ( snake_case__ :Dict , snake_case__ :Union[str, Any] , snake_case__ :Tuple) -> List[str]:
_A = []
for i in range(len(snake_case__)):
menu.append(Things(name[i] , value[i] , weight[i]))
return menu
def snake_case ( snake_case__ :Union[str, Any] , snake_case__ :Tuple , snake_case__ :Any) -> List[str]:
_A = sorted(snake_case__ , key=snake_case__ , reverse=snake_case__)
_A = []
_A , _A = 0.0, 0.0
for i in range(len(snake_case__)):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i])
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def snake_case ( ) -> str:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | import numpy as np
import qiskit
def snake_case ( snake_case__ :int = 8 , snake_case__ :int | None = None) -> str:
_A = np.random.default_rng(seed=snake_case__)
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
_A = 6 * key_len
# Measurement basis for Alice's qubits.
_A = rng.integers(2 , size=snake_case__)
# The set of states Alice will prepare.
_A = rng.integers(2 , size=snake_case__)
# Measurement basis for Bob's qubits.
_A = rng.integers(2 , size=snake_case__)
# Quantum Circuit to simulate BB84
_A = qiskit.QuantumCircuit(snake_case__ , name="""BB84""")
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(snake_case__):
if alice_state[index] == 1:
bbaa_circ.x(snake_case__)
if alice_basis[index] == 1:
bbaa_circ.h(snake_case__)
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(snake_case__):
if bob_basis[index] == 1:
bbaa_circ.h(snake_case__)
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
_A = qiskit.Aer.get_backend("""aer_simulator""")
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
_A = qiskit.execute(snake_case__ , snake_case__ , shots=1 , seed_simulator=snake_case__)
# Returns the result of measurement.
_A = job.result().get_counts(snake_case__).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
_A = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
snake_case__ , snake_case__ , snake_case__)
if alice_basis_bit == bob_basis_bit
])
# Get final key. Pad with 0 if too short, otherwise truncate.
_A = gen_key[:key_len] if len(snake_case__) >= key_len else gen_key.ljust(snake_case__ , """0""")
return key
if __name__ == "__main__":
print(F'''The generated key is : {bbaa(8, seed=0)}''')
from doctest import testmod
testmod()
| 83 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Tuple = '''dpt'''
def __init__( self , lowerCAmelCase_=7_68 , lowerCAmelCase_=12 , lowerCAmelCase_=12 , lowerCAmelCase_=30_72 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-12 , lowerCAmelCase_=3_84 , lowerCAmelCase_=16 , lowerCAmelCase_=3 , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=[2, 5, 8, 11] , lowerCAmelCase_="project" , lowerCAmelCase_=[4, 2, 1, 0.5] , lowerCAmelCase_=[96, 1_92, 3_84, 7_68] , lowerCAmelCase_=2_56 , lowerCAmelCase_=-1 , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=0.4 , lowerCAmelCase_=2_55 , lowerCAmelCase_=0.1 , lowerCAmelCase_=[1, 10_24, 24, 24] , lowerCAmelCase_=[0, 1] , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> Union[str, Any]:
super().__init__(**lowerCAmelCase_ )
_A = hidden_size
_A = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("""Initializing the config with a `BiT` backbone.""" )
_A = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
}
_A = BitConfig(**lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
logger.info("""Initializing the config with a `BiT` backbone.""" )
_A = BitConfig(**lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A = backbone_config
else:
raise ValueError(
F'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
_A = backbone_featmap_shape
_A = neck_ignore_stages
if readout_type != "project":
raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""" )
else:
_A = None
_A = None
_A = []
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = initializer_range
_A = layer_norm_eps
_A = image_size
_A = patch_size
_A = num_channels
_A = qkv_bias
_A = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""" )
_A = readout_type
_A = reassemble_factors
_A = neck_hidden_sizes
_A = fusion_hidden_size
_A = head_in_index
_A = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
_A = use_auxiliary_head
_A = auxiliary_loss_weight
_A = semantic_loss_ignore_index
_A = semantic_classifier_dropout
def UpperCAmelCase ( self ) -> Dict:
_A = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_A = self.backbone_config.to_dict()
_A = self.__class__.model_type
return output
| 83 | import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def snake_case ( snake_case__ :int) -> Optional[int]:
return EnvironmentCommand()
def snake_case ( snake_case__ :Tuple) -> List[str]:
return EnvironmentCommand(args.accelerate_config_file)
class a ( __lowerCAmelCase ):
"""simple docstring"""
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ ) -> Tuple:
_A = parser.add_parser("""env""" )
download_parser.set_defaults(func=lowerCAmelCase_ )
download_parser.add_argument(
"""--accelerate-config_file""" , default=lowerCAmelCase_ , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=lowerCAmelCase_ )
def __init__( self , lowerCAmelCase_ , *lowerCAmelCase_ ) -> None:
_A = accelerate_config_file
def UpperCAmelCase ( self ) -> Dict:
_A = """not installed"""
if is_safetensors_available():
import safetensors
_A = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
_A = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
_A = """not installed"""
_A = _A = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_A = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(lowerCAmelCase_ ):
_A = load_config_from_file(self._accelerate_config_file ).to_dict()
_A = (
"""\n""".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
else F'''\t{accelerate_config}'''
)
_A = """not installed"""
_A = """NA"""
if is_torch_available():
import torch
_A = torch.__version__
_A = torch.cuda.is_available()
_A = """not installed"""
_A = """NA"""
if is_tf_available():
import tensorflow as tf
_A = tf.__version__
try:
# deprecated in v2.1
_A = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_A = bool(tf.config.list_physical_devices("""GPU""" ) )
_A = """not installed"""
_A = """not installed"""
_A = """not installed"""
_A = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
_A = flax.__version__
_A = jax.__version__
_A = jaxlib.__version__
_A = jax.lib.xla_bridge.get_backend().platform
_A = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": F'''{safetensors_version}''',
"""Accelerate version""": F'''{accelerate_version}''',
"""Accelerate config""": F'''{accelerate_config_str}''',
"""PyTorch version (GPU?)""": F'''{pt_version} ({pt_cuda_available})''',
"""Tensorflow version (GPU?)""": F'''{tf_version} ({tf_cuda_available})''',
"""Flax version (CPU?/GPU?/TPU?)""": F'''{flax_version} ({jax_backend})''',
"""Jax version""": F'''{jax_version}''',
"""JaxLib version""": F'''{jaxlib_version}''',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(lowerCAmelCase_ ) )
return info
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ ) -> Tuple:
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 83 | 1 |
import comet # From: unbabel-comet
import torch
import datasets
_SCREAMING_SNAKE_CASE = datasets.logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = '\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = "{COMET}: A Neural Framework for {MT} Evaluation",\n author = "Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon",\n booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",\n month = nov,\n year = "2020",\n address = "Online",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",\n pages = "2685--2702",\n}\n'
_SCREAMING_SNAKE_CASE = '\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n'
_SCREAMING_SNAKE_CASE = '\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric(\'comet\')\n >>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use\n >>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]\n >>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]\n >>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results["scores"]])\n [0.19, 0.92]\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://unbabel.github.io/COMET/html/index.html""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""sources""": datasets.Value("""string""" , id="""sequence""" ),
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/Unbabel/COMET"""] , reference_urls=[
"""https://github.com/Unbabel/COMET""",
"""https://www.aclweb.org/anthology/2020.emnlp-main.213/""",
"""http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6""",
] , )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]:
if self.config_name == "default":
_A = comet.load_from_checkpoint(comet.download_model("""wmt20-comet-da""" ) )
else:
_A = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=False ) -> Optional[Any]:
if gpus is None:
_A = 1 if torch.cuda.is_available() else 0
_A = {"""src""": sources, """mt""": predictions, """ref""": references}
_A = [dict(zip(lowerCAmelCase_ , lowerCAmelCase_ ) ) for t in zip(*data.values() )]
_A , _A = self.scorer.predict(lowerCAmelCase_ , gpus=lowerCAmelCase_ , progress_bar=lowerCAmelCase_ )
return {"mean_score": mean_score, "scores": scores}
| 83 | import colorsys
from PIL import Image # type: ignore
def snake_case ( snake_case__ :float , snake_case__ :float , snake_case__ :int) -> float:
_A = x
_A = y
for step in range(snake_case__): # noqa: B007
_A = a * a - b * b + x
_A = 2 * a * b + y
_A = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def snake_case ( snake_case__ :float) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def snake_case ( snake_case__ :float) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(snake_case__ , 1 , 1))
def snake_case ( snake_case__ :int = 800 , snake_case__ :int = 600 , snake_case__ :float = -0.6 , snake_case__ :float = 0 , snake_case__ :float = 3.2 , snake_case__ :int = 50 , snake_case__ :bool = True , ) -> Image.Image:
_A = Image.new("""RGB""" , (image_width, image_height))
_A = img.load()
# loop through the image-coordinates
for image_x in range(snake_case__):
for image_y in range(snake_case__):
# determine the figure-coordinates based on the image-coordinates
_A = figure_width / image_width * image_height
_A = figure_center_x + (image_x / image_width - 0.5) * figure_width
_A = figure_center_y + (image_y / image_height - 0.5) * figure_height
_A = get_distance(snake_case__ , snake_case__ , snake_case__)
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_A = get_color_coded_rgb(snake_case__)
else:
_A = get_black_and_white_rgb(snake_case__)
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_SCREAMING_SNAKE_CASE = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 83 | 1 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=2 , lowerCAmelCase_=24 , lowerCAmelCase_=16 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=10 , lowerCAmelCase_=0.02 , lowerCAmelCase_=None , lowerCAmelCase_=2 , lowerCAmelCase_=2 , ) -> List[str]:
_A = parent
_A = batch_size
_A = patch_size
_A = max_length
_A = num_mel_bins
_A = is_training
_A = use_labels
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = type_sequence_label_size
_A = initializer_range
_A = scope
_A = frequency_stride
_A = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_A = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
_A = (self.max_length - self.patch_size) // self.time_stride + 1
_A = frequency_out_dimension * time_out_dimension
_A = num_patches + 2
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = self.get_config()
return config, input_values, labels
def UpperCAmelCase ( self ) -> Tuple:
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_A = ASTModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {"""input_values""": input_values}
return config, inputs_dict
@require_torch
class a ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :Any = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase :Dict = (
{'''audio-classification''': ASTForAudioClassification, '''feature-extraction''': ASTModel}
if is_torch_available()
else {}
)
lowerCamelCase :Dict = False
lowerCamelCase :str = False
lowerCamelCase :Union[str, Any] = False
lowerCamelCase :List[str] = False
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = ASTModelTester(self )
_A = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 )
def UpperCAmelCase ( self ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason="""AST does not use inputs_embeds""" )
def UpperCAmelCase ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase ( self ) -> Any:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(lowerCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear ) )
def UpperCAmelCase ( self ) -> Any:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(lowerCAmelCase_ )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ["""input_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
@slow
def UpperCAmelCase ( self ) -> Optional[Any]:
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = ASTModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def snake_case ( ) -> List[Any]:
_A = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""")
_A , _A = torchaudio.load(snake_case__)
return audio, sampling_rate
@require_torch
@require_torchaudio
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase ( self ) -> List[str]:
return (
ASTFeatureExtractor.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" )
if is_torchaudio_available()
else None
)
@slow
def UpperCAmelCase ( self ) -> List[str]:
_A = self.default_feature_extractor
_A = ASTForAudioClassification.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" ).to(lowerCAmelCase_ )
_A = self.default_feature_extractor
_A , _A = prepare_audio()
_A = audio.squeeze().numpy()
_A = feature_extractor(lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , return_tensors="""pt""" ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
_A = model(**lowerCAmelCase_ )
# verify the logits
_A = torch.Size((1, 5_27) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_A = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) )
| 83 | import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
_SCREAMING_SNAKE_CASE = datasets.logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
_SCREAMING_SNAKE_CASE = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
_SCREAMING_SNAKE_CASE = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def snake_case ( snake_case__ :Optional[Any] , snake_case__ :str , snake_case__ :List[str]=False , snake_case__ :Dict=False , snake_case__ :Any=True , snake_case__ :List[str]=False , snake_case__ :Optional[Any]="dummy_doc") -> List[Any]:
_A = {doc: key_lines}
_A = {doc: sys_lines}
_A = {}
_A = 0
_A = 0
_A = 0
_A = 0
_A = 0
_A = 0
_A , _A = reader.get_doc_mentions(snake_case__ , key_doc_lines[doc] , snake_case__)
key_singletons_num += singletons_num
if NP_only or min_span:
_A = reader.set_annotated_parse_trees(snake_case__ , key_doc_lines[doc] , snake_case__ , snake_case__)
_A , _A = reader.get_doc_mentions(snake_case__ , sys_doc_lines[doc] , snake_case__)
sys_singletons_num += singletons_num
if NP_only or min_span:
_A = reader.set_annotated_parse_trees(snake_case__ , key_doc_lines[doc] , snake_case__ , snake_case__)
if remove_nested:
_A , _A = reader.remove_nested_coref_mentions(snake_case__ , snake_case__)
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
_A , _A = reader.remove_nested_coref_mentions(snake_case__ , snake_case__)
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
_A = reader.get_mention_assignments(snake_case__ , snake_case__)
_A = reader.get_mention_assignments(snake_case__ , snake_case__)
_A = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''')
logger.info(
"""Number of resulting singleton clusters in the key """
F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''')
if not keep_singletons:
logger.info(
F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
"""files, respectively""")
return doc_coref_infos
def snake_case ( snake_case__ :Tuple , snake_case__ :Tuple , snake_case__ :Dict , snake_case__ :Dict , snake_case__ :Tuple , snake_case__ :Dict , snake_case__ :Tuple) -> int:
_A = get_coref_infos(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__)
_A = {}
_A = 0
_A = 0
for name, metric in metrics:
_A , _A , _A = evaluator.evaluate_documents(snake_case__ , snake_case__ , beta=1)
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa})
logger.info(
name.ljust(10) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , )
if conll_subparts_num == 3:
_A = (conll / 3) * 100
logger.info(F'''CoNLL score: {conll:.2f}''')
output_scores.update({"""conll_score""": conll})
return output_scores
def snake_case ( snake_case__ :Union[str, Any]) -> List[Any]:
_A = False
for line in key_lines:
if not line.startswith("""#"""):
if len(line.split()) > 6:
_A = line.split()[5]
if not parse_col == "-":
_A = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> Union[str, Any]:
_A = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
_A = util.check_gold_parse_annotation(lowerCAmelCase_ )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
_A = evaluate(
key_lines=lowerCAmelCase_ , sys_lines=lowerCAmelCase_ , metrics=lowerCAmelCase_ , NP_only=lowerCAmelCase_ , remove_nested=lowerCAmelCase_ , keep_singletons=lowerCAmelCase_ , min_span=lowerCAmelCase_ , )
return score
| 83 | 1 |
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def snake_case ( snake_case__ :List[str] , snake_case__ :Optional[int] , snake_case__ :List[Any]) -> Any:
_A = UniSpeechSatForSequenceClassification.from_pretrained(snake_case__ , config=snake_case__)
_A = downstream_dict["""projector.weight"""]
_A = downstream_dict["""projector.bias"""]
_A = downstream_dict["""model.post_net.linear.weight"""]
_A = downstream_dict["""model.post_net.linear.bias"""]
return model
def snake_case ( snake_case__ :Optional[Any] , snake_case__ :Union[str, Any] , snake_case__ :Any) -> Optional[Any]:
_A = UniSpeechSatForAudioFrameClassification.from_pretrained(snake_case__ , config=snake_case__)
_A = downstream_dict["""model.linear.weight"""]
_A = downstream_dict["""model.linear.bias"""]
return model
def snake_case ( snake_case__ :Any , snake_case__ :Optional[Any] , snake_case__ :Union[str, Any]) -> Optional[int]:
_A = UniSpeechSatForXVector.from_pretrained(snake_case__ , config=snake_case__)
_A = downstream_dict["""connector.weight"""]
_A = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel):
_A = downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
_A = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
_A = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
_A = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
_A = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
_A = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
_A = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def snake_case ( snake_case__ :str , snake_case__ :int , snake_case__ :Dict , snake_case__ :Any) -> Tuple:
_A = torch.load(snake_case__ , map_location="""cpu""")
_A = checkpoint["""Downstream"""]
_A = UniSpeechSatConfig.from_pretrained(snake_case__)
_A = WavaVecaFeatureExtractor.from_pretrained(
snake_case__ , return_attention_mask=snake_case__ , do_normalize=snake_case__)
_A = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification"""):
_A = convert_classification(snake_case__ , snake_case__ , snake_case__)
elif arch.endswith("""ForAudioFrameClassification"""):
_A = convert_diarization(snake_case__ , snake_case__ , snake_case__)
elif arch.endswith("""ForXVector"""):
_A = convert_xvector(snake_case__ , snake_case__ , snake_case__)
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''')
if hf_config.use_weighted_layer_sum:
_A = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(snake_case__)
hf_model.save_pretrained(snake_case__)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 83 | import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
_SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
_SCREAMING_SNAKE_CASE = {'facebook/blenderbot_small-90M': 512}
def snake_case ( snake_case__ :Tuple) -> str:
_A = set()
_A = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
_A = char
_A = set(snake_case__)
return pairs
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :List[Any] = VOCAB_FILES_NAMES
lowerCamelCase :Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase :List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase :int = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="__start__" , lowerCAmelCase_="__end__" , lowerCAmelCase_="__unk__" , lowerCAmelCase_="__null__" , **lowerCAmelCase_ , ) -> int:
super().__init__(unk_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , **lowerCAmelCase_ )
with open(lowerCAmelCase_ , encoding="""utf-8""" ) as vocab_handle:
_A = json.load(lowerCAmelCase_ )
_A = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase_ , encoding="""utf-8""" ) as merges_handle:
_A = merges_handle.read().split("""\n""" )[1:-1]
_A = [tuple(merge.split() ) for merge in merges]
_A = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_A = {}
@property
def UpperCAmelCase ( self ) -> int:
return len(self.encoder )
def UpperCAmelCase ( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str:
if token in self.cache:
return self.cache[token]
_A = re.sub("""([.,!?()])""" , r""" \1""" , lowerCAmelCase_ )
_A = re.sub("""(')""" , r""" \1 """ , lowerCAmelCase_ )
_A = re.sub(r"""\s{2,}""" , """ """ , lowerCAmelCase_ )
if "\n" in token:
_A = token.replace("""\n""" , """ __newln__""" )
_A = token.split(""" """ )
_A = []
for token in tokens:
if not len(lowerCAmelCase_ ):
continue
_A = token.lower()
_A = tuple(lowerCAmelCase_ )
_A = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
_A = get_pairs(lowerCAmelCase_ )
if not pairs:
words.append(lowerCAmelCase_ )
continue
while True:
_A = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A = bigram
_A = []
_A = 0
while i < len(lowerCAmelCase_ ):
try:
_A = word.index(lowerCAmelCase_ , lowerCAmelCase_ )
new_word.extend(word[i:j] )
_A = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A = tuple(lowerCAmelCase_ )
_A = new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
_A = get_pairs(lowerCAmelCase_ )
_A = """@@ """.join(lowerCAmelCase_ )
_A = word[:-4]
_A = word
words.append(lowerCAmelCase_ )
return " ".join(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[str]:
_A = []
_A = re.findall(r"""\S+\n?""" , lowerCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(""" """ ) ) )
return split_tokens
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int:
_A = token.lower()
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str:
return self.decoder.get(lowerCAmelCase_ , self.unk_token )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str:
_A = """ """.join(lowerCAmelCase_ ).replace("""@@ """ , """""" ).strip()
return out_string
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_A = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + """\n""" )
_A = 0
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
_A = token_index
writer.write(""" """.join(lowerCAmelCase_ ) + """\n""" )
index += 1
return vocab_file, merge_file
| 83 | 1 |
from importlib import import_module
from .logging import get_logger
_SCREAMING_SNAKE_CASE = get_logger(__name__)
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=None ) -> List[Any]:
_A = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("""__""" ):
setattr(self , lowerCAmelCase_ , getattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
_A = module._original_module if isinstance(lowerCAmelCase_ , _PatchedModuleObj ) else module
class a :
"""simple docstring"""
lowerCamelCase :Optional[Any] = []
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ) -> List[Any]:
_A = obj
_A = target
_A = new
_A = target.split(""".""" )[0]
_A = {}
_A = attrs or []
def __enter__( self ) -> Optional[int]:
*_A , _A = self.target.split(""".""" )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowerCAmelCase_ ) ):
try:
_A = import_module(""".""".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
_A = getattr(self.obj , lowerCAmelCase_ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowerCAmelCase_ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
_A = obj_attr
# patch at top level
setattr(self.obj , lowerCAmelCase_ , _PatchedModuleObj(lowerCAmelCase_ , attrs=self.attrs ) )
_A = getattr(self.obj , lowerCAmelCase_ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowerCAmelCase_ , lowerCAmelCase_ , _PatchedModuleObj(getattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , attrs=self.attrs ) )
_A = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
# finally set the target attribute
setattr(lowerCAmelCase_ , lowerCAmelCase_ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
_A = getattr(import_module(""".""".join(lowerCAmelCase_ ) ) , lowerCAmelCase_ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowerCAmelCase_ ) is attr_value:
_A = getattr(self.obj , lowerCAmelCase_ )
setattr(self.obj , lowerCAmelCase_ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
_A = globals()["""__builtins__"""][target_attr]
setattr(self.obj , lowerCAmelCase_ , self.new )
else:
raise RuntimeError(F'''Tried to patch attribute {target_attr} instead of a submodule.''' )
def __exit__( self , *lowerCAmelCase_ ) -> int:
for attr in list(self.original ):
setattr(self.obj , lowerCAmelCase_ , self.original.pop(lowerCAmelCase_ ) )
def UpperCAmelCase ( self ) -> Tuple:
self.__enter__()
self._active_patches.append(self )
def UpperCAmelCase ( self ) -> Any:
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 83 | _SCREAMING_SNAKE_CASE = {
'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.',
'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.',
'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----',
'2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...',
'8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.',
':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.',
'?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-',
'(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/'
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
_SCREAMING_SNAKE_CASE = {value: key for key, value in MORSE_CODE_DICT.items()}
def snake_case ( snake_case__ :str) -> str:
return " ".join(MORSE_CODE_DICT[char] for char in message.upper())
def snake_case ( snake_case__ :str) -> str:
return "".join(REVERSE_DICT[char] for char in message.split())
def snake_case ( ) -> None:
_A = """Morse code here!"""
print(snake_case__)
_A = encrypt(snake_case__)
print(snake_case__)
_A = decrypt(snake_case__)
print(snake_case__)
if __name__ == "__main__":
main()
| 83 | 1 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :Tuple = StableUnCLIPPipeline
lowerCamelCase :Tuple = TEXT_TO_IMAGE_PARAMS
lowerCamelCase :Any = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase :Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase :Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
lowerCamelCase :Dict = False
def UpperCAmelCase ( self ) -> str:
_A = 32
_A = embedder_hidden_size
# prior components
torch.manual_seed(0 )
_A = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
_A = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCAmelCase_ , projection_dim=lowerCAmelCase_ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
_A = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowerCAmelCase_ , num_layers=1 , )
torch.manual_seed(0 )
_A = DDPMScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=10_00 , clip_sample=lowerCAmelCase_ , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , )
# regular denoising components
torch.manual_seed(0 )
_A = StableUnCLIPImageNormalizer(embedding_dim=lowerCAmelCase_ )
_A = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
_A = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
_A = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCAmelCase_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
_A = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCAmelCase_ , layers_per_block=1 , upcast_attention=lowerCAmelCase_ , use_linear_projection=lowerCAmelCase_ , )
torch.manual_seed(0 )
_A = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=lowerCAmelCase_ , steps_offset=1 , )
torch.manual_seed(0 )
_A = AutoencoderKL()
_A = {
# prior components
"""prior_tokenizer""": prior_tokenizer,
"""prior_text_encoder""": prior_text_encoder,
"""prior""": prior,
"""prior_scheduler""": prior_scheduler,
# image noising components
"""image_normalizer""": image_normalizer,
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder,
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
}
return components
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=0 ) -> int:
if str(lowerCAmelCase_ ).startswith("""mps""" ):
_A = torch.manual_seed(lowerCAmelCase_ )
else:
_A = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_A = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""prior_num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase ( self ) -> Tuple:
_A = torch_device == """cpu"""
self._test_attention_slicing_forward_pass(test_max_difference=lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=lowerCAmelCase_ )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" )
_A = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_A = torch.Generator(device="""cpu""" ).manual_seed(0 )
_A = pipe("""anime turle""" , generator=lowerCAmelCase_ , output_type="""np""" )
_A = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_A = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
_A = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_A = pipe(
"""anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , )
_A = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 83 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 83 | 1 |
def snake_case ( snake_case__ :str , snake_case__ :str) -> str:
_A = len(snake_case__)
_A = len(snake_case__)
_A = (
first_str_length if first_str_length > second_str_length else second_str_length
)
_A = []
for char_count in range(snake_case__):
if char_count < first_str_length:
output_list.append(first_str[char_count])
if char_count < second_str_length:
output_list.append(second_str[char_count])
return "".join(snake_case__)
if __name__ == "__main__":
print(alternative_string_arrange('AB', 'XYZ'), end=' ')
| 83 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Tuple = '''philschmid/bart-large-cnn-samsum'''
lowerCamelCase :Tuple = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
lowerCamelCase :List[Any] = '''summarizer'''
lowerCamelCase :List[str] = AutoTokenizer
lowerCamelCase :Dict = AutoModelForSeqaSeqLM
lowerCamelCase :int = ['''text''']
lowerCamelCase :List[Any] = ['''text''']
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
return self.pre_processor(lowerCAmelCase_ , return_tensors="""pt""" , truncation=lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
return self.model.generate(**lowerCAmelCase_ )[0]
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
return self.pre_processor.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
| 83 | 1 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class a ( __lowerCAmelCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Any:
_A = SMALL_MODEL_IDENTIFIER
_A = """pt"""
_A = """tf"""
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]:
_A = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
_A = TFAutoModel.from_pretrained(self.test_model , from_pt=lowerCAmelCase_ )
model_tf.save_pretrained(lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = """mock_framework"""
# Framework provided - return whatever the user provides
_A = FeaturesManager.determine_framework(self.test_model , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(lowerCAmelCase_ )
_A = FeaturesManager.determine_framework(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(lowerCAmelCase_ )
_A = FeaturesManager.determine_framework(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> int:
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(lowerCAmelCase_ )
_A = FeaturesManager.determine_framework(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(lowerCAmelCase_ )
_A = FeaturesManager.determine_framework(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(lowerCAmelCase_ ):
_A = FeaturesManager.determine_framework(lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> str:
_A = MagicMock(return_value=lowerCAmelCase_ )
with patch("""transformers.onnx.features.is_tf_available""" , lowerCAmelCase_ ):
_A = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCAmelCase_ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
_A = MagicMock(return_value=lowerCAmelCase_ )
with patch("""transformers.onnx.features.is_torch_available""" , lowerCAmelCase_ ):
_A = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCAmelCase_ , self.framework_tf )
# Both in environment -> use PyTorch
_A = MagicMock(return_value=lowerCAmelCase_ )
_A = MagicMock(return_value=lowerCAmelCase_ )
with patch("""transformers.onnx.features.is_tf_available""" , lowerCAmelCase_ ), patch(
"""transformers.onnx.features.is_torch_available""" , lowerCAmelCase_ ):
_A = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCAmelCase_ , self.framework_pt )
# Both not in environment -> raise error
_A = MagicMock(return_value=lowerCAmelCase_ )
_A = MagicMock(return_value=lowerCAmelCase_ )
with patch("""transformers.onnx.features.is_tf_available""" , lowerCAmelCase_ ), patch(
"""transformers.onnx.features.is_torch_available""" , lowerCAmelCase_ ):
with self.assertRaises(lowerCAmelCase_ ):
_A = FeaturesManager.determine_framework(self.test_model )
| 83 | import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
_SCREAMING_SNAKE_CASE = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def snake_case ( snake_case__ :Union[str, Any]) -> Dict:
_A = torch.load(snake_case__ , map_location="""cpu""")
return sd
def snake_case ( snake_case__ :List[str] , snake_case__ :Optional[Any] , snake_case__ :int=rename_keys_prefix) -> Optional[Any]:
_A = OrderedDict()
_A = torch.arange(config.max_position_embeddings).expand((1, -1))
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_A = key
for name_pair in rename_keys_prefix:
_A = new_key.replace(name_pair[0] , name_pair[1])
_A = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_A = new_d["""cls.predictions.bias"""]
return new_d
@torch.no_grad()
def snake_case ( snake_case__ :Tuple , snake_case__ :Tuple) -> int:
assert (
checkpoint_path.split("""/""")[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
_A = """pretraining"""
if "vcr" in checkpoint_path:
_A = {"""visual_embedding_dim""": 512}
elif "vqa_advanced" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048}
elif "vqa" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048}
elif "nlvr" in checkpoint_path:
_A = {"""visual_embedding_dim""": 1_024}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''')
else:
if "vcr" in checkpoint_path:
_A = {"""visual_embedding_dim""": 512}
_A = """multichoice"""
elif "vqa_advanced" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048}
_A = """vqa_advanced"""
elif "vqa" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048, """num_labels""": 3_129}
_A = """vqa"""
elif "nlvr" in checkpoint_path:
_A = {
"""visual_embedding_dim""": 1_024,
"""num_labels""": 2,
}
_A = """nlvr"""
_A = VisualBertConfig(**snake_case__)
# Load State Dict
_A = load_state_dict(snake_case__)
_A = get_new_dict(snake_case__ , snake_case__)
if model_type == "pretraining":
_A = VisualBertForPreTraining(snake_case__)
elif model_type == "vqa":
_A = VisualBertForQuestionAnswering(snake_case__)
elif model_type == "nlvr":
_A = VisualBertForVisualReasoning(snake_case__)
elif model_type == "multichoice":
_A = VisualBertForMultipleChoice(snake_case__)
model.load_state_dict(snake_case__)
# Save Checkpoints
Path(snake_case__).mkdir(exist_ok=snake_case__)
model.save_pretrained(snake_case__)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 83 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 83 | from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class a ( __lowerCAmelCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[str]:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def UpperCAmelCase ( self ) -> Optional[int]:
_A = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self._create_example_records()
_A = Dataset.from_list(lowerCAmelCase_ )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(lowerCAmelCase_ ):
self.assertDictEqual(lowerCAmelCase_ , example_records[i] )
def UpperCAmelCase ( self ) -> str:
_A = self._create_example_records()
_A = Dataset.from_list(lowerCAmelCase_ )
_A = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def UpperCAmelCase ( self ) -> Any: # checks what happens with missing columns
_A = [{"""col_1""": 1}, {"""col_2""": """x"""}]
_A = Dataset.from_list(lowerCAmelCase_ )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def UpperCAmelCase ( self ) -> Tuple: # checks if the type can be inferred from the second record
_A = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
_A = Dataset.from_list(lowerCAmelCase_ )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def UpperCAmelCase ( self ) -> Any:
_A = Dataset.from_list([] )
self.assertEqual(len(lowerCAmelCase_ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 83 | 1 |
import os
from typing import Dict, List, Tuple, TypeVar, Union
_SCREAMING_SNAKE_CASE = TypeVar('T')
_SCREAMING_SNAKE_CASE = Union[List[T], Tuple[T, ...]]
_SCREAMING_SNAKE_CASE = Union[T, List[T], Dict[str, T]]
_SCREAMING_SNAKE_CASE = Union[str, bytes, os.PathLike]
| 83 | def snake_case ( snake_case__ :int = 1_000_000) -> int:
_A = set(range(3 , snake_case__ , 2))
primes.add(2)
for p in range(3 , snake_case__ , 2):
if p not in primes:
continue
primes.difference_update(set(range(p * p , snake_case__ , snake_case__)))
_A = [float(snake_case__) for n in range(limit + 1)]
for p in primes:
for n in range(snake_case__ , limit + 1 , snake_case__):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:]))
if __name__ == "__main__":
print(F'''{solution() = }''')
| 83 | 1 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class a ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :int = AutoencoderKL
lowerCamelCase :Union[str, Any] = '''sample'''
lowerCamelCase :int = 1E-2
@property
def UpperCAmelCase ( self ) -> Any:
_A = 4
_A = 3
_A = (32, 32)
_A = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCAmelCase_ )
return {"sample": image}
@property
def UpperCAmelCase ( self ) -> List[str]:
return (3, 32, 32)
@property
def UpperCAmelCase ( self ) -> int:
return (3, 32, 32)
def UpperCAmelCase ( self ) -> Any:
_A = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
_A = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase ( self ) -> List[Any]:
pass
def UpperCAmelCase ( self ) -> Tuple:
pass
@unittest.skipIf(torch_device == """mps""" , """Gradient checkpointing skipped on MPS""" )
def UpperCAmelCase ( self ) -> Optional[int]:
# enable deterministic behavior for gradient checkpointing
_A , _A = self.prepare_init_args_and_inputs_for_common()
_A = self.model_class(**lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
assert not model.is_gradient_checkpointing and model.training
_A = model(**lowerCAmelCase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
_A = torch.randn_like(lowerCAmelCase_ )
_A = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
_A = self.model_class(**lowerCAmelCase_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(lowerCAmelCase_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
_A = model_a(**lowerCAmelCase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
_A = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
_A = dict(model.named_parameters() )
_A = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A , _A = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" , output_loading_info=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(lowerCAmelCase_ )
_A = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def UpperCAmelCase ( self ) -> Optional[int]:
_A = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" )
_A = model.to(lowerCAmelCase_ )
model.eval()
if torch_device == "mps":
_A = torch.manual_seed(0 )
else:
_A = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
_A = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
_A = image.to(lowerCAmelCase_ )
with torch.no_grad():
_A = model(lowerCAmelCase_ , sample_posterior=lowerCAmelCase_ , generator=lowerCAmelCase_ ).sample
_A = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
_A = torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
_A = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
_A = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(lowerCAmelCase_ , lowerCAmelCase_ , rtol=1E-2 ) )
@slow
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
return F'''gaussian_noise_s={seed}_shape={'_'.join([str(lowerCAmelCase_ ) for s in shape] )}.npy'''
def UpperCAmelCase ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self , lowerCAmelCase_=0 , lowerCAmelCase_=(4, 3, 5_12, 5_12) , lowerCAmelCase_=False ) -> Any:
_A = torch.floataa if fpaa else torch.floataa
_A = torch.from_numpy(load_hf_numpy(self.get_file_format(lowerCAmelCase_ , lowerCAmelCase_ ) ) ).to(lowerCAmelCase_ ).to(lowerCAmelCase_ )
return image
def UpperCAmelCase ( self , lowerCAmelCase_="CompVis/stable-diffusion-v1-4" , lowerCAmelCase_=False ) -> int:
_A = """fp16""" if fpaa else None
_A = torch.floataa if fpaa else torch.floataa
_A = AutoencoderKL.from_pretrained(
lowerCAmelCase_ , subfolder="""vae""" , torch_dtype=lowerCAmelCase_ , revision=lowerCAmelCase_ , )
model.to(lowerCAmelCase_ ).eval()
return model
def UpperCAmelCase ( self , lowerCAmelCase_=0 ) -> int:
if torch_device == "mps":
return torch.manual_seed(lowerCAmelCase_ )
return torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
_A = self.get_sd_vae_model()
_A = self.get_sd_image(lowerCAmelCase_ )
_A = self.get_generator(lowerCAmelCase_ )
with torch.no_grad():
_A = model(lowerCAmelCase_ , generator=lowerCAmelCase_ , sample_posterior=lowerCAmelCase_ ).sample
assert sample.shape == image.shape
_A = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_A = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(lowerCAmelCase_ , lowerCAmelCase_ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
_A = self.get_sd_vae_model(fpaa=lowerCAmelCase_ )
_A = self.get_sd_image(lowerCAmelCase_ , fpaa=lowerCAmelCase_ )
_A = self.get_generator(lowerCAmelCase_ )
with torch.no_grad():
_A = model(lowerCAmelCase_ , generator=lowerCAmelCase_ , sample_posterior=lowerCAmelCase_ ).sample
assert sample.shape == image.shape
_A = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_A = torch.tensor(lowerCAmelCase_ )
assert torch_all_close(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_A = self.get_sd_vae_model()
_A = self.get_sd_image(lowerCAmelCase_ )
with torch.no_grad():
_A = model(lowerCAmelCase_ ).sample
assert sample.shape == image.shape
_A = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_A = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(lowerCAmelCase_ , lowerCAmelCase_ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_A = self.get_sd_vae_model()
_A = self.get_sd_image(lowerCAmelCase_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
_A = model.decode(lowerCAmelCase_ ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
_A = sample[-1, -2:, :2, -2:].flatten().cpu()
_A = torch.tensor(lowerCAmelCase_ )
assert torch_all_close(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
_A = self.get_sd_vae_model(fpaa=lowerCAmelCase_ )
_A = self.get_sd_image(lowerCAmelCase_ , shape=(3, 4, 64, 64) , fpaa=lowerCAmelCase_ )
with torch.no_grad():
_A = model.decode(lowerCAmelCase_ ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
_A = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_A = torch.tensor(lowerCAmelCase_ )
assert torch_all_close(lowerCAmelCase_ , lowerCAmelCase_ , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]:
_A = self.get_sd_vae_model(fpaa=lowerCAmelCase_ )
_A = self.get_sd_image(lowerCAmelCase_ , shape=(3, 4, 64, 64) , fpaa=lowerCAmelCase_ )
with torch.no_grad():
_A = model.decode(lowerCAmelCase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_A = model.decode(lowerCAmelCase_ ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
assert torch_all_close(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int:
_A = self.get_sd_vae_model()
_A = self.get_sd_image(lowerCAmelCase_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
_A = model.decode(lowerCAmelCase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_A = model.decode(lowerCAmelCase_ ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
assert torch_all_close(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
_A = self.get_sd_vae_model()
_A = self.get_sd_image(lowerCAmelCase_ )
_A = self.get_generator(lowerCAmelCase_ )
with torch.no_grad():
_A = model.encode(lowerCAmelCase_ ).latent_dist
_A = dist.sample(generator=lowerCAmelCase_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
_A = sample[0, -1, -3:, -3:].flatten().cpu()
_A = torch.tensor(lowerCAmelCase_ )
_A = 3E-3 if torch_device != """mps""" else 1E-2
assert torch_all_close(lowerCAmelCase_ , lowerCAmelCase_ , atol=lowerCAmelCase_ )
| 83 | import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_="None" , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ) -> Union[str, Any]:
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = relative_attention
_A = position_biased_input
_A = pos_att_type
_A = scope
def UpperCAmelCase ( self ) -> Dict:
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self ) -> Optional[int]:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Any:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_A = DebertaVaModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
_A = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
_A = model(lowerCAmelCase_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_A = DebertaVaForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_A = self.num_labels
_A = DebertaVaForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_A = self.num_labels
_A = DebertaVaForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
_A = DebertaVaForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_A = DebertaVaForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :int = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCamelCase :str = (
{
'''feature-extraction''': DebertaVaModel,
'''fill-mask''': DebertaVaForMaskedLM,
'''question-answering''': DebertaVaForQuestionAnswering,
'''text-classification''': DebertaVaForSequenceClassification,
'''token-classification''': DebertaVaForTokenClassification,
'''zero-shot''': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase :str = True
lowerCamelCase :Union[str, Any] = False
lowerCamelCase :Optional[int] = False
lowerCamelCase :List[str] = False
lowerCamelCase :str = False
def UpperCAmelCase ( self ) -> Optional[int]:
_A = DebertaVaModelTester(self )
_A = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def UpperCAmelCase ( self ) -> List[str]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> List[str]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Any:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> int:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*lowerCAmelCase_ )
@slow
def UpperCAmelCase ( self ) -> Any:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = DebertaVaModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="""Model not available yet""" )
def UpperCAmelCase ( self ) -> int:
pass
@slow
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
_A = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
_A = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
# compare the actual values for a slice.
_A = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1E-4 ) , F'''{output[:, 1:4, 1:4]}''' )
| 83 | 1 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=32 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=[10, 20, 30, 40] , lowerCAmelCase_=[2, 2, 3, 2] , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=10 , lowerCAmelCase_=0.02 , lowerCAmelCase_=["stage2", "stage3", "stage4"] , lowerCAmelCase_=[2, 3, 4] , lowerCAmelCase_=None , ) -> Optional[int]:
_A = parent
_A = batch_size
_A = image_size
_A = num_channels
_A = num_stages
_A = hidden_sizes
_A = depths
_A = is_training
_A = use_labels
_A = intermediate_size
_A = hidden_act
_A = num_labels
_A = initializer_range
_A = out_features
_A = out_indices
_A = scope
def UpperCAmelCase ( self ) -> Tuple:
_A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.num_labels )
_A = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self ) -> str:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
_A = ConvNextModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_A = ConvNextForImageClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
_A = ConvNextBackbone(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_A = None
_A = ConvNextBackbone(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def UpperCAmelCase ( self ) -> Tuple:
_A = self.prepare_config_and_inputs()
_A , _A , _A = config_and_inputs
_A = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :str = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase :Any = (
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase :List[str] = True
lowerCamelCase :Tuple = False
lowerCamelCase :Optional[Any] = False
lowerCamelCase :Optional[int] = False
lowerCamelCase :str = False
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = ConvNextModelTester(self )
_A = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 )
def UpperCAmelCase ( self ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self ) -> Dict:
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def UpperCAmelCase ( self ) -> Tuple:
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def UpperCAmelCase ( self ) -> Union[str, Any]:
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def UpperCAmelCase ( self ) -> List[Any]:
pass
def UpperCAmelCase ( self ) -> Tuple:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(lowerCAmelCase_ )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> str:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> int:
def check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_A = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
_A = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
_A = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_A = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@slow
def UpperCAmelCase ( self ) -> List[str]:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = ConvNextModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def snake_case ( ) -> Tuple:
_A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase ( self ) -> Optional[Any]:
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(lowerCAmelCase_ )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=lowerCAmelCase_ , return_tensors="""pt""" ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
_A = model(**lowerCAmelCase_ )
# verify the logits
_A = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_A = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) )
@require_torch
class a ( unittest.TestCase , __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Union[str, Any] = (ConvNextBackbone,) if is_torch_available() else ()
lowerCamelCase :Any = ConvNextConfig
lowerCamelCase :Union[str, Any] = False
def UpperCAmelCase ( self ) -> Dict:
_A = ConvNextModelTester(self )
| 83 | def snake_case ( snake_case__ :int , snake_case__ :int) -> int:
return int(input_a == input_a == 0)
def snake_case ( ) -> None:
print("""Truth Table of NOR Gate:""")
print("""| Input 1 | Input 2 | Output |""")
print(F'''| 0 | 0 | {nor_gate(0 , 0)} |''')
print(F'''| 0 | 1 | {nor_gate(0 , 1)} |''')
print(F'''| 1 | 0 | {nor_gate(1 , 0)} |''')
print(F'''| 1 | 1 | {nor_gate(1 , 1)} |''')
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 83 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def snake_case ( ) -> Any:
_A = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=snake_case__)
_A = parser.add_subparsers(help="""accelerate command helpers""")
# Register commands
get_config_parser(subparsers=snake_case__)
env_command_parser(subparsers=snake_case__)
launch_command_parser(subparsers=snake_case__)
tpu_command_parser(subparsers=snake_case__)
test_command_parser(subparsers=snake_case__)
# Let's go
_A = parser.parse_args()
if not hasattr(snake_case__ , """func"""):
parser.print_help()
exit(1)
# Run
args.func(snake_case__)
if __name__ == "__main__":
main()
| 83 | import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=sys.maxsize ) -> str:
_A = """bilinear"""
_A = max_size
_A = short_edge_length
def __call__( self , lowerCAmelCase_ ) -> Optional[Any]:
_A = []
for img in imgs:
_A , _A = img.shape[:2]
# later: provide list and randomly choose index for resize
_A = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
_A = size * 1.0 / min(lowerCAmelCase_ , lowerCAmelCase_ )
if h < w:
_A , _A = size, scale * w
else:
_A , _A = scale * h, size
if max(lowerCAmelCase_ , lowerCAmelCase_ ) > self.max_size:
_A = self.max_size * 1.0 / max(lowerCAmelCase_ , lowerCAmelCase_ )
_A = newh * scale
_A = neww * scale
_A = int(neww + 0.5 )
_A = int(newh + 0.5 )
if img.dtype == np.uinta:
_A = Image.fromarray(lowerCAmelCase_ )
_A = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
_A = np.asarray(lowerCAmelCase_ )
else:
_A = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
_A = nn.functional.interpolate(
lowerCAmelCase_ , (newh, neww) , mode=self.interp_method , align_corners=lowerCAmelCase_ ).squeeze(0 )
img_augs.append(lowerCAmelCase_ )
return img_augs
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ ) -> List[Any]:
_A = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
_A = cfg.INPUT.FORMAT
_A = cfg.SIZE_DIVISIBILITY
_A = cfg.PAD_VALUE
_A = cfg.INPUT.MAX_SIZE_TEST
_A = cfg.MODEL.DEVICE
_A = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_A = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_A = lambda lowerCAmelCase_ : (x - self.pixel_mean) / self.pixel_std
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
_A = tuple(max(lowerCAmelCase_ ) for s in zip(*[img.shape for img in images] ) )
_A = [im.shape[-2:] for im in images]
_A = [
nn.functional.pad(
lowerCAmelCase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(lowerCAmelCase_ , lowerCAmelCase_ )
]
return torch.stack(lowerCAmelCase_ ), torch.tensor(lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_=False ) -> int:
with torch.no_grad():
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A = [images]
if single_image:
assert len(lowerCAmelCase_ ) == 1
for i in range(len(lowerCAmelCase_ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(lowerCAmelCase_ , images.pop(lowerCAmelCase_ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
lowerCAmelCase_ , torch.as_tensor(img_tensorize(images.pop(lowerCAmelCase_ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
_A = torch.tensor([im.shape[:2] for im in images] )
_A = self.aug(lowerCAmelCase_ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
_A = [self.normalizer(lowerCAmelCase_ ) for x in images]
# now pad them to do the following operations
_A , _A = self.pad(lowerCAmelCase_ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
_A = torch.true_divide(lowerCAmelCase_ , lowerCAmelCase_ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def snake_case ( snake_case__ :Optional[int] , snake_case__ :Optional[Any]) -> Tuple:
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def snake_case ( snake_case__ :Optional[int] , snake_case__ :Tuple[int, int]) -> Optional[Any]:
assert torch.isfinite(snake_case__).all(), "Box tensor contains infinite or NaN!"
_A , _A = box_size
tensor[:, 0].clamp_(min=0 , max=snake_case__)
tensor[:, 1].clamp_(min=0 , max=snake_case__)
tensor[:, 2].clamp_(min=0 , max=snake_case__)
tensor[:, 3].clamp_(min=0 , max=snake_case__)
| 83 | 1 |
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Tuple:
super().__init__(
lowerCAmelCase_ , question_encoder_tokenizer=lowerCAmelCase_ , generator_tokenizer=lowerCAmelCase_ , index=lowerCAmelCase_ , init_retrieval=lowerCAmelCase_ , )
_A = None
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int:
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
_A = self._infer_socket_ifname()
# avoid clash with the NCCL port
_A = str(distributed_port + 1 )
_A = dist.new_group(ranks=lowerCAmelCase_ , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def UpperCAmelCase ( self ) -> int:
return dist.get_rank(group=self.process_group ) == 0
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=torch.floataa ) -> List[str]:
_A = torch.empty(lowerCAmelCase_ , dtype=lowerCAmelCase_ )
dist.scatter(lowerCAmelCase_ , src=0 , scatter_list=lowerCAmelCase_ , group=self.process_group )
return target_tensor
def UpperCAmelCase ( self ) -> Any:
_A = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
_A = next((addr for addr in addrs if addr.startswith("""e""" )) , lowerCAmelCase_ )
return ifname
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
_A , _A = self._main_retrieve(lowerCAmelCase_ , lowerCAmelCase_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowerCAmelCase_ )
# distributed training
_A = dist.get_world_size(group=self.process_group )
# gather logic
_A = None
if self._is_main():
_A = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(lowerCAmelCase_ )]
dist.gather(torch.tensor(lowerCAmelCase_ ) , dst=0 , gather_list=lowerCAmelCase_ , group=self.process_group )
# scatter logic
_A = question_hidden_states.shape[0]
_A = []
_A = []
if self._is_main():
assert len(lowerCAmelCase_ ) == world_size
_A , _A = self._main_retrieve(torch.cat(lowerCAmelCase_ ).numpy() , lowerCAmelCase_ )
_A , _A = torch.tensor(lowerCAmelCase_ ), torch.tensor(lowerCAmelCase_ )
_A = self._chunk_tensor(lowerCAmelCase_ , lowerCAmelCase_ )
_A = self._chunk_tensor(lowerCAmelCase_ , lowerCAmelCase_ )
_A = self._scattered(lowerCAmelCase_ , [n_queries, n_docs] , target_type=torch.intaa )
_A = self._scattered(lowerCAmelCase_ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(lowerCAmelCase_ )
| 83 | from collections import defaultdict
def snake_case ( snake_case__ :int) -> int:
_A = 1
_A = True
for v in tree[start]:
if v not in visited:
ret += dfs(snake_case__)
if ret % 2 == 0:
cuts.append(snake_case__)
return ret
def snake_case ( ) -> Any:
dfs(1)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 10, 9
_SCREAMING_SNAKE_CASE = defaultdict(list)
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 83 | 1 |
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
_SCREAMING_SNAKE_CASE = 'hf-internal-testing/tiny-random-bert'
_SCREAMING_SNAKE_CASE = os.path.join(TRANSFORMERS_CACHE, 'models--hf-internal-testing--tiny-random-bert')
_SCREAMING_SNAKE_CASE = '9b8c223d42b2188cb49d29af482996f9d0f3e5a6'
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[str]:
_A = cached_file(lowerCAmelCase_ , lowerCAmelCase_ )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(lowerCAmelCase_ ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
with open(os.path.join(lowerCAmelCase_ , """refs""" , """main""" ) ) as f:
_A = f.read()
self.assertEqual(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , """snapshots""" , lowerCAmelCase_ , lowerCAmelCase_ ) )
self.assertTrue(os.path.isfile(lowerCAmelCase_ ) )
# File is cached at the same place the second time.
_A = cached_file(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Using a specific revision to test the full commit hash.
_A = cached_file(lowerCAmelCase_ , lowerCAmelCase_ , revision="""9b8c223""" )
self.assertEqual(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , """snapshots""" , lowerCAmelCase_ , lowerCAmelCase_ ) )
def UpperCAmelCase ( self ) -> Union[str, Any]:
with self.assertRaisesRegex(lowerCAmelCase_ , """is not a valid model identifier""" ):
_A = cached_file("""tiny-random-bert""" , lowerCAmelCase_ )
with self.assertRaisesRegex(lowerCAmelCase_ , """is not a valid git identifier""" ):
_A = cached_file(lowerCAmelCase_ , lowerCAmelCase_ , revision="""aaaa""" )
with self.assertRaisesRegex(lowerCAmelCase_ , """does not appear to have a file named""" ):
_A = cached_file(lowerCAmelCase_ , """conf""" )
def UpperCAmelCase ( self ) -> Dict:
with self.assertRaisesRegex(lowerCAmelCase_ , """does not appear to have a file named""" ):
_A = cached_file(lowerCAmelCase_ , """conf""" )
with open(os.path.join(lowerCAmelCase_ , """refs""" , """main""" ) ) as f:
_A = f.read()
self.assertTrue(os.path.isfile(os.path.join(lowerCAmelCase_ , """.no_exist""" , lowerCAmelCase_ , """conf""" ) ) )
_A = cached_file(lowerCAmelCase_ , """conf""" , _raise_exceptions_for_missing_entries=lowerCAmelCase_ )
self.assertIsNone(lowerCAmelCase_ )
_A = cached_file(lowerCAmelCase_ , """conf""" , local_files_only=lowerCAmelCase_ , _raise_exceptions_for_missing_entries=lowerCAmelCase_ )
self.assertIsNone(lowerCAmelCase_ )
_A = mock.Mock()
_A = 5_00
_A = {}
_A = HTTPError
_A = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=lowerCAmelCase_ ) as mock_head:
_A = cached_file(lowerCAmelCase_ , """conf""" , _raise_exceptions_for_connection_errors=lowerCAmelCase_ )
self.assertIsNone(lowerCAmelCase_ )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase ( self ) -> Optional[Any]:
self.assertTrue(has_file("""hf-internal-testing/tiny-bert-pt-only""" , lowerCAmelCase_ ) )
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , lowerCAmelCase_ ) )
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , lowerCAmelCase_ ) )
def UpperCAmelCase ( self ) -> Tuple:
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo("""bert-base-cased""" , """ahah.txt""" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(lowerCAmelCase_ , """is not a valid model identifier""" ):
get_file_from_repo("""bert-base-case""" , lowerCAmelCase_ )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(lowerCAmelCase_ , """is not a valid git identifier""" ):
get_file_from_repo("""bert-base-cased""" , lowerCAmelCase_ , revision="""ahaha""" )
_A = get_file_from_repo("""bert-base-cased""" , lowerCAmelCase_ )
# The name is the cached name which is not very easy to test, so instead we load the content.
_A = json.loads(open(lowerCAmelCase_ , """r""" ).read() )
self.assertEqual(config["""hidden_size"""] , 7_68 )
def UpperCAmelCase ( self ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
_A = Path(lowerCAmelCase_ ) / """a.txt"""
filename.touch()
self.assertEqual(get_file_from_repo(lowerCAmelCase_ , """a.txt""" ) , str(lowerCAmelCase_ ) )
self.assertIsNone(get_file_from_repo(lowerCAmelCase_ , """b.txt""" ) )
| 83 | import heapq
def snake_case ( snake_case__ :dict) -> set[int]:
_A = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(snake_case__ , [-1 * len(snake_case__), (key, value)])
# chosen_vertices = set of chosen vertices
_A = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
_A = heapq.heappop(snake_case__)[1][0]
chosen_vertices.add(snake_case__)
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
_A = elem[1][1].index(snake_case__)
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(snake_case__)
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 83 | 1 |
_SCREAMING_SNAKE_CASE = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_SCREAMING_SNAKE_CASE = [{'type': 'code', 'content': INSTALL_CONTENT}]
_SCREAMING_SNAKE_CASE = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 83 | import math
import unittest
def snake_case ( snake_case__ :int) -> bool:
assert isinstance(snake_case__ , snake_case__) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def UpperCAmelCase ( self ) -> Dict:
with self.assertRaises(lowerCAmelCase_ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 83 | 1 |
from typing import List
import numpy as np
def snake_case ( snake_case__ :dict) -> int:
_A = {key: len(snake_case__) for key, value in gen_kwargs.items() if isinstance(snake_case__ , snake_case__)}
if len(set(lists_lengths.values())) > 1:
raise RuntimeError(
(
"""Sharding is ambiguous for this dataset: """
+ """we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"""
+ """\n""".join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items())
+ """\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, """
+ """and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."""
))
_A = max(lists_lengths.values() , default=0)
return max(1 , snake_case__)
def snake_case ( snake_case__ :int , snake_case__ :int) -> List[range]:
_A = []
for group_idx in range(snake_case__):
_A = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
_A = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
_A = range(snake_case__ , start + num_shards_to_add)
shards_indices_per_group.append(snake_case__)
return shards_indices_per_group
def snake_case ( snake_case__ :dict , snake_case__ :int) -> List[dict]:
_A = _number_of_shards_in_gen_kwargs(snake_case__)
if num_shards == 1:
return [dict(snake_case__)]
else:
_A = _distribute_shards(num_shards=snake_case__ , max_num_jobs=snake_case__)
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(snake_case__ , snake_case__)
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(snake_case__))
]
def snake_case ( snake_case__ :List[dict]) -> dict:
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , snake_case__)
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def snake_case ( snake_case__ :np.random.Generator , snake_case__ :dict) -> dict:
_A = {len(snake_case__) for value in gen_kwargs.values() if isinstance(snake_case__ , snake_case__)}
_A = {}
for size in list_sizes:
_A = list(range(snake_case__))
rng.shuffle(indices_per_size[size])
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
_A = dict(snake_case__)
for key, value in shuffled_kwargs.items():
if isinstance(snake_case__ , snake_case__):
_A = [value[i] for i in indices_per_size[len(snake_case__)]]
return shuffled_kwargs
| 83 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 83 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :List[str] = '''falcon'''
lowerCamelCase :str = ['''past_key_values''']
def __init__( self , lowerCAmelCase_=6_50_24 , lowerCAmelCase_=45_44 , lowerCAmelCase_=32 , lowerCAmelCase_=71 , lowerCAmelCase_=1E-5 , lowerCAmelCase_=0.02 , lowerCAmelCase_=True , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=11 , lowerCAmelCase_=11 , **lowerCAmelCase_ , ) -> int:
_A = vocab_size
# Backward compatibility with n_embed kwarg
_A = kwargs.pop("""n_embed""" , lowerCAmelCase_ )
_A = hidden_size if n_embed is None else n_embed
_A = num_hidden_layers
_A = num_attention_heads
_A = layer_norm_epsilon
_A = initializer_range
_A = use_cache
_A = hidden_dropout
_A = attention_dropout
_A = bos_token_id
_A = eos_token_id
_A = num_attention_heads if num_kv_heads is None else num_kv_heads
_A = alibi
_A = new_decoder_architecture
_A = multi_query # Ignored when new_decoder_architecture is True
_A = parallel_attn
_A = bias
super().__init__(bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def UpperCAmelCase ( self ) -> Any:
return self.hidden_size // self.num_attention_heads
@property
def UpperCAmelCase ( self ) -> str:
return not self.alibi
| 83 | from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase )
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
self.check_model_type(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Tuple:
_A , _A = {}, {}
if padding is not None:
_A = padding
if truncation is not None:
_A = truncation
if top_k is not None:
_A = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ ) -> Union[str, Any]:
if isinstance(lowerCAmelCase_ , (Image.Image, str) ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A = {"""image""": image, """question""": question}
else:
_A = image
_A = super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ )
return results
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> Any:
_A = load_image(inputs["""image"""] )
_A = self.tokenizer(
inputs["""question"""] , return_tensors=self.framework , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ )
_A = self.image_processor(images=lowerCAmelCase_ , return_tensors=self.framework )
model_inputs.update(lowerCAmelCase_ )
return model_inputs
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
_A = self.model(**lowerCAmelCase_ )
return model_outputs
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=5 ) -> Union[str, Any]:
if top_k > self.model.config.num_labels:
_A = self.model.config.num_labels
if self.framework == "pt":
_A = model_outputs.logits.sigmoid()[0]
_A , _A = probs.topk(lowerCAmelCase_ )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
_A = scores.tolist()
_A = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowerCAmelCase_ , lowerCAmelCase_ )]
| 83 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_SCREAMING_SNAKE_CASE = {
'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['MobileViTFeatureExtractor']
_SCREAMING_SNAKE_CASE = ['MobileViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileViTForImageClassification',
'MobileViTForSemanticSegmentation',
'MobileViTModel',
'MobileViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileViTForImageClassification',
'TFMobileViTForSemanticSegmentation',
'TFMobileViTModel',
'TFMobileViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 83 | import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def snake_case ( snake_case__ :str , snake_case__ :str , snake_case__ :str , snake_case__ :PreTrainedTokenizer , snake_case__ :int , snake_case__ :Optional[int] = None , ) -> Optional[int]:
_A = {}
if train_file is not None:
_A = [train_file]
if eval_file is not None:
_A = [eval_file]
if test_file is not None:
_A = [test_file]
_A = datasets.load_dataset("""csv""" , data_files=snake_case__)
_A = list(ds[list(files.keys())[0]].features.keys())
_A = features_name.pop(snake_case__)
_A = list(set(ds[list(files.keys())[0]][label_name]))
_A = {label: i for i, label in enumerate(snake_case__)}
_A = tokenizer.model_input_names
_A = {}
if len(snake_case__) == 1:
for k in files.keys():
_A = ds[k].map(
lambda snake_case__: tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=snake_case__ , max_length=snake_case__ , padding="""max_length""") , batched=snake_case__ , )
elif len(snake_case__) == 2:
for k in files.keys():
_A = ds[k].map(
lambda snake_case__: tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=snake_case__ , max_length=snake_case__ , padding="""max_length""" , ) , batched=snake_case__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_A = {k: v for k, v in ex.items() if k in input_names}
_A = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_A = {k: v for k, v in ex.items() if k in input_names}
_A = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_A = {k: v for k, v in ex.items() if k in input_names}
_A = labelaid[ex[label_name]]
yield (d, label)
_A = (
tf.data.Dataset.from_generator(
snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_A = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN])))
_A = (
tf.data.Dataset.from_generator(
snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_A = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION])))
_A = (
tf.data.Dataset.from_generator(
snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_A = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST])))
return train_ds, val_ds, test_ds, labelaid
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class a :
"""simple docstring"""
lowerCamelCase :int = field(metadata={'''help''': '''Which column contains the label'''} )
lowerCamelCase :str = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the training file'''} )
lowerCamelCase :Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the development file'''} )
lowerCamelCase :Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the test file'''} )
lowerCamelCase :int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowerCamelCase :bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class a :
"""simple docstring"""
lowerCamelCase :str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowerCamelCase :bool = field(default=__lowerCAmelCase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def snake_case ( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments))
_A , _A , _A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""")
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(
F'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1)}, '''
F'''16-bits training: {training_args.fpaa}''')
logger.info(F'''Training/evaluation parameters {training_args}''')
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_A , _A , _A , _A = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=snake_case__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
_A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(snake_case__) , labelaid=snake_case__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
_A = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path) , config=snake_case__ , cache_dir=model_args.cache_dir , )
def compute_metrics(snake_case__ :EvalPrediction) -> Dict:
_A = np.argmax(p.predictions , axis=1)
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_A = TFTrainer(
model=snake_case__ , args=snake_case__ , train_dataset=snake_case__ , eval_dataset=snake_case__ , compute_metrics=snake_case__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
_A = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""")
_A = trainer.evaluate()
_A = os.path.join(training_args.output_dir , """eval_results.txt""")
with open(snake_case__ , """w""") as writer:
logger.info("""***** Eval results *****""")
for key, value in result.items():
logger.info(F''' {key} = {value}''')
writer.write(F'''{key} = {value}\n''')
results.update(snake_case__)
return results
if __name__ == "__main__":
main()
| 83 | 1 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ ) -> Any:
_A = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
_A = len(lowerCAmelCase_ ) - 1
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> list[float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_A = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , lowerCAmelCase_ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(lowerCAmelCase_ ) , 5 ) == 1
return output_values
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> tuple[float, float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_A = self.basis_function(lowerCAmelCase_ )
_A = 0.0
_A = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def UpperCAmelCase ( self , lowerCAmelCase_ = 0.01 ) -> str:
from matplotlib import pyplot as plt # type: ignore
_A = [] # x coordinates of points to plot
_A = [] # y coordinates of points to plot
_A = 0.0
while t <= 1:
_A = self.bezier_curve_function(lowerCAmelCase_ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
_A = [i[0] for i in self.list_of_points]
_A = [i[1] for i in self.list_of_points]
plt.plot(
lowerCAmelCase_ , lowerCAmelCase_ , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , )
plt.scatter(lowerCAmelCase_ , lowerCAmelCase_ , color="""red""" , label="""Control Points""" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 83 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Union[str, Any] = '''speech_to_text'''
lowerCamelCase :List[str] = ['''past_key_values''']
lowerCamelCase :str = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowerCAmelCase_=1_00_00 , lowerCAmelCase_=12 , lowerCAmelCase_=20_48 , lowerCAmelCase_=4 , lowerCAmelCase_=6 , lowerCAmelCase_=20_48 , lowerCAmelCase_=4 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_="relu" , lowerCAmelCase_=2_56 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=2 , lowerCAmelCase_=True , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_=60_00 , lowerCAmelCase_=10_24 , lowerCAmelCase_=2 , lowerCAmelCase_=(5, 5) , lowerCAmelCase_=10_24 , lowerCAmelCase_=80 , lowerCAmelCase_=1 , **lowerCAmelCase_ , ) -> Tuple:
_A = vocab_size
_A = d_model
_A = encoder_ffn_dim
_A = encoder_layers
_A = encoder_attention_heads
_A = decoder_ffn_dim
_A = decoder_layers
_A = decoder_attention_heads
_A = dropout
_A = attention_dropout
_A = activation_dropout
_A = activation_function
_A = init_std
_A = encoder_layerdrop
_A = decoder_layerdrop
_A = use_cache
_A = encoder_layers
_A = scale_embedding # scale factor will be sqrt(d_model) if True
_A = max_source_positions
_A = max_target_positions
_A = num_conv_layers
_A = list(lowerCAmelCase_ )
_A = conv_channels
_A = input_feat_per_channel
_A = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
F'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 83 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def snake_case ( snake_case__ :Optional[int]) -> List[Any]:
_A = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
_A = [144, 192, 240]
_A = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
_A = [96, 120, 144]
_A = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
_A = [64, 80, 96]
_A = [16, 16, 24, 48, 64, 80, 320]
_A = 0.05
_A = 2.0
if mobilevit_name.startswith("""deeplabv3_"""):
_A = 512
_A = 16
_A = 21
_A = """pascal-voc-id2label.json"""
else:
_A = 1_000
_A = """imagenet-1k-id2label.json"""
_A = """huggingface/label-files"""
_A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""") , """r"""))
_A = {int(snake_case__): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
return config
def snake_case ( snake_case__ :Any , snake_case__ :Optional[Any]=False) -> List[Any]:
for i in range(1 , 6):
if F'''layer_{i}.''' in name:
_A = name.replace(F'''layer_{i}.''' , F'''encoder.layer.{i - 1}.''')
if "conv_1." in name:
_A = name.replace("""conv_1.""" , """conv_stem.""")
if ".block." in name:
_A = name.replace(""".block.""" , """.""")
if "exp_1x1" in name:
_A = name.replace("""exp_1x1""" , """expand_1x1""")
if "red_1x1" in name:
_A = name.replace("""red_1x1""" , """reduce_1x1""")
if ".local_rep.conv_3x3." in name:
_A = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""")
if ".local_rep.conv_1x1." in name:
_A = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""")
if ".norm." in name:
_A = name.replace(""".norm.""" , """.normalization.""")
if ".conv." in name:
_A = name.replace(""".conv.""" , """.convolution.""")
if ".conv_proj." in name:
_A = name.replace(""".conv_proj.""" , """.conv_projection.""")
for i in range(0 , 2):
for j in range(0 , 4):
if F'''.{i}.{j}.''' in name:
_A = name.replace(F'''.{i}.{j}.''' , F'''.{i}.layer.{j}.''')
for i in range(2 , 6):
for j in range(0 , 4):
if F'''.{i}.{j}.''' in name:
_A = name.replace(F'''.{i}.{j}.''' , F'''.{i}.''')
if "expand_1x1" in name:
_A = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""")
if "conv_3x3" in name:
_A = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""")
if "reduce_1x1" in name:
_A = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""")
for i in range(2 , 5):
if F'''.global_rep.{i}.weight''' in name:
_A = name.replace(F'''.global_rep.{i}.weight''' , """.layernorm.weight""")
if F'''.global_rep.{i}.bias''' in name:
_A = name.replace(F'''.global_rep.{i}.bias''' , """.layernorm.bias""")
if ".global_rep." in name:
_A = name.replace(""".global_rep.""" , """.transformer.""")
if ".pre_norm_mha.0." in name:
_A = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""")
if ".pre_norm_mha.1.out_proj." in name:
_A = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""")
if ".pre_norm_ffn.0." in name:
_A = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""")
if ".pre_norm_ffn.1." in name:
_A = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""")
if ".pre_norm_ffn.4." in name:
_A = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""")
if ".transformer." in name:
_A = name.replace(""".transformer.""" , """.transformer.layer.""")
if ".aspp_layer." in name:
_A = name.replace(""".aspp_layer.""" , """.""")
if ".aspp_pool." in name:
_A = name.replace(""".aspp_pool.""" , """.""")
if "seg_head." in name:
_A = name.replace("""seg_head.""" , """segmentation_head.""")
if "segmentation_head.classifier.classifier." in name:
_A = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""")
if "classifier.fc." in name:
_A = name.replace("""classifier.fc.""" , """classifier.""")
elif (not base_model) and ("segmentation_head." not in name):
_A = """mobilevit.""" + name
return name
def snake_case ( snake_case__ :Tuple , snake_case__ :int , snake_case__ :Union[str, Any]=False) -> Optional[Any]:
if base_model:
_A = """"""
else:
_A = """mobilevit."""
for key in orig_state_dict.copy().keys():
_A = orig_state_dict.pop(snake_case__)
if key[:8] == "encoder.":
_A = key[8:]
if "qkv" in key:
_A = key.split(""".""")
_A = int(key_split[0][6:]) - 1
_A = int(key_split[3])
_A = model.get_submodule(F'''{model_prefix}encoder.layer.{layer_num}''')
_A = layer.transformer.layer[transformer_num].attention.attention.all_head_size
_A = (
F'''{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'''
)
if "weight" in key:
_A = val[:dim, :]
_A = val[dim : dim * 2, :]
_A = val[-dim:, :]
else:
_A = val[:dim]
_A = val[dim : dim * 2]
_A = val[-dim:]
else:
_A = val
return orig_state_dict
def snake_case ( ) -> Optional[Any]:
_A = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_A = Image.open(requests.get(snake_case__ , stream=snake_case__).raw)
return im
@torch.no_grad()
def snake_case ( snake_case__ :str , snake_case__ :List[str] , snake_case__ :Optional[Any] , snake_case__ :int=False) -> Tuple:
_A = get_mobilevit_config(snake_case__)
# load original state_dict
_A = torch.load(snake_case__ , map_location="""cpu""")
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_"""):
_A = MobileViTForSemanticSegmentation(snake_case__).eval()
else:
_A = MobileViTForImageClassification(snake_case__).eval()
_A = convert_state_dict(snake_case__ , snake_case__)
model.load_state_dict(snake_case__)
# Check outputs on an image, prepared by MobileViTImageProcessor
_A = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32)
_A = image_processor(images=prepare_img() , return_tensors="""pt""")
_A = model(**snake_case__)
_A = outputs.logits
if mobilevit_name.startswith("""deeplabv3_"""):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
_A = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
])
elif mobilevit_name == "deeplabv3_mobilevit_xs":
_A = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
])
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
_A = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
])
else:
raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''')
assert torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1E-4)
else:
assert logits.shape == (1, 1_000)
if mobilevit_name == "mobilevit_s":
_A = torch.tensor([-0.9866, 0.2392, -1.1241])
elif mobilevit_name == "mobilevit_xs":
_A = torch.tensor([-2.4761, -0.9399, -1.9587])
elif mobilevit_name == "mobilevit_xxs":
_A = torch.tensor([-1.9364, -1.2327, -0.4653])
else:
raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''')
assert torch.allclose(logits[0, :3] , snake_case__ , atol=1E-4)
Path(snake_case__).mkdir(exist_ok=snake_case__)
print(F'''Saving model {mobilevit_name} to {pytorch_dump_folder_path}''')
model.save_pretrained(snake_case__)
print(F'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(snake_case__)
if push_to_hub:
_A = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""")
_A = model_mapping[mobilevit_name]
image_processor.push_to_hub(snake_case__ , organization="""apple""")
model.push_to_hub(snake_case__ , organization="""apple""")
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--mobilevit_name',
default='mobilevit_s',
type=str,
help=(
'Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','
' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'
),
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 83 | from __future__ import annotations
from collections.abc import Callable
def snake_case ( snake_case__ :Callable[[int | float], int | float] , snake_case__ :int | float , snake_case__ :int | float , snake_case__ :int = 100 , ) -> float:
_A = x_start
_A = fnc(snake_case__)
_A = 0.0
for _ in range(snake_case__):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
_A = (x_end - x_start) / steps + xa
_A = fnc(snake_case__)
area += abs(fxa + fxa) * (xa - xa) / 2
# Increment step
_A = xa
_A = fxa
return area
if __name__ == "__main__":
def snake_case ( snake_case__ :Tuple) -> List[str]:
return x**3 + x**2
print('f(x) = x^3 + x^2')
print('The area between the curve, x = -5, x = 5 and the x axis is:')
_SCREAMING_SNAKE_CASE = 10
while i <= 100_000:
print(F'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10
| 83 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_SCREAMING_SNAKE_CASE = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 83 | import numpy as np
import qiskit
def snake_case ( snake_case__ :int = 8 , snake_case__ :int | None = None) -> str:
_A = np.random.default_rng(seed=snake_case__)
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
_A = 6 * key_len
# Measurement basis for Alice's qubits.
_A = rng.integers(2 , size=snake_case__)
# The set of states Alice will prepare.
_A = rng.integers(2 , size=snake_case__)
# Measurement basis for Bob's qubits.
_A = rng.integers(2 , size=snake_case__)
# Quantum Circuit to simulate BB84
_A = qiskit.QuantumCircuit(snake_case__ , name="""BB84""")
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(snake_case__):
if alice_state[index] == 1:
bbaa_circ.x(snake_case__)
if alice_basis[index] == 1:
bbaa_circ.h(snake_case__)
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(snake_case__):
if bob_basis[index] == 1:
bbaa_circ.h(snake_case__)
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
_A = qiskit.Aer.get_backend("""aer_simulator""")
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
_A = qiskit.execute(snake_case__ , snake_case__ , shots=1 , seed_simulator=snake_case__)
# Returns the result of measurement.
_A = job.result().get_counts(snake_case__).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
_A = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
snake_case__ , snake_case__ , snake_case__)
if alice_basis_bit == bob_basis_bit
])
# Get final key. Pad with 0 if too short, otherwise truncate.
_A = gen_key[:key_len] if len(snake_case__) >= key_len else gen_key.ljust(snake_case__ , """0""")
return key
if __name__ == "__main__":
print(F'''The generated key is : {bbaa(8, seed=0)}''')
from doctest import testmod
testmod()
| 83 | 1 |
from __future__ import annotations
def snake_case ( snake_case__ :list[list[int]]) -> int:
# preprocessing the first row
for i in range(1 , len(matrix[0])):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(snake_case__)):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(snake_case__)):
for j in range(1 , len(matrix[0])):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1])
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def snake_case ( snake_case__ :int) -> Optional[int]:
return EnvironmentCommand()
def snake_case ( snake_case__ :Tuple) -> List[str]:
return EnvironmentCommand(args.accelerate_config_file)
class a ( __lowerCAmelCase ):
"""simple docstring"""
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ ) -> Tuple:
_A = parser.add_parser("""env""" )
download_parser.set_defaults(func=lowerCAmelCase_ )
download_parser.add_argument(
"""--accelerate-config_file""" , default=lowerCAmelCase_ , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=lowerCAmelCase_ )
def __init__( self , lowerCAmelCase_ , *lowerCAmelCase_ ) -> None:
_A = accelerate_config_file
def UpperCAmelCase ( self ) -> Dict:
_A = """not installed"""
if is_safetensors_available():
import safetensors
_A = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
_A = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
_A = """not installed"""
_A = _A = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_A = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(lowerCAmelCase_ ):
_A = load_config_from_file(self._accelerate_config_file ).to_dict()
_A = (
"""\n""".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
else F'''\t{accelerate_config}'''
)
_A = """not installed"""
_A = """NA"""
if is_torch_available():
import torch
_A = torch.__version__
_A = torch.cuda.is_available()
_A = """not installed"""
_A = """NA"""
if is_tf_available():
import tensorflow as tf
_A = tf.__version__
try:
# deprecated in v2.1
_A = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_A = bool(tf.config.list_physical_devices("""GPU""" ) )
_A = """not installed"""
_A = """not installed"""
_A = """not installed"""
_A = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
_A = flax.__version__
_A = jax.__version__
_A = jaxlib.__version__
_A = jax.lib.xla_bridge.get_backend().platform
_A = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": F'''{safetensors_version}''',
"""Accelerate version""": F'''{accelerate_version}''',
"""Accelerate config""": F'''{accelerate_config_str}''',
"""PyTorch version (GPU?)""": F'''{pt_version} ({pt_cuda_available})''',
"""Tensorflow version (GPU?)""": F'''{tf_version} ({tf_cuda_available})''',
"""Flax version (CPU?/GPU?/TPU?)""": F'''{flax_version} ({jax_backend})''',
"""Jax version""": F'''{jax_version}''',
"""JaxLib version""": F'''{jaxlib_version}''',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(lowerCAmelCase_ ) )
return info
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ ) -> Tuple:
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 83 | 1 |
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=sys.maxsize ) -> str:
_A = """bilinear"""
_A = max_size
_A = short_edge_length
def __call__( self , lowerCAmelCase_ ) -> Optional[Any]:
_A = []
for img in imgs:
_A , _A = img.shape[:2]
# later: provide list and randomly choose index for resize
_A = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
_A = size * 1.0 / min(lowerCAmelCase_ , lowerCAmelCase_ )
if h < w:
_A , _A = size, scale * w
else:
_A , _A = scale * h, size
if max(lowerCAmelCase_ , lowerCAmelCase_ ) > self.max_size:
_A = self.max_size * 1.0 / max(lowerCAmelCase_ , lowerCAmelCase_ )
_A = newh * scale
_A = neww * scale
_A = int(neww + 0.5 )
_A = int(newh + 0.5 )
if img.dtype == np.uinta:
_A = Image.fromarray(lowerCAmelCase_ )
_A = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
_A = np.asarray(lowerCAmelCase_ )
else:
_A = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
_A = nn.functional.interpolate(
lowerCAmelCase_ , (newh, neww) , mode=self.interp_method , align_corners=lowerCAmelCase_ ).squeeze(0 )
img_augs.append(lowerCAmelCase_ )
return img_augs
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ ) -> List[Any]:
_A = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
_A = cfg.INPUT.FORMAT
_A = cfg.SIZE_DIVISIBILITY
_A = cfg.PAD_VALUE
_A = cfg.INPUT.MAX_SIZE_TEST
_A = cfg.MODEL.DEVICE
_A = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_A = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_A = lambda lowerCAmelCase_ : (x - self.pixel_mean) / self.pixel_std
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
_A = tuple(max(lowerCAmelCase_ ) for s in zip(*[img.shape for img in images] ) )
_A = [im.shape[-2:] for im in images]
_A = [
nn.functional.pad(
lowerCAmelCase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(lowerCAmelCase_ , lowerCAmelCase_ )
]
return torch.stack(lowerCAmelCase_ ), torch.tensor(lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_=False ) -> int:
with torch.no_grad():
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A = [images]
if single_image:
assert len(lowerCAmelCase_ ) == 1
for i in range(len(lowerCAmelCase_ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(lowerCAmelCase_ , images.pop(lowerCAmelCase_ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
lowerCAmelCase_ , torch.as_tensor(img_tensorize(images.pop(lowerCAmelCase_ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
_A = torch.tensor([im.shape[:2] for im in images] )
_A = self.aug(lowerCAmelCase_ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
_A = [self.normalizer(lowerCAmelCase_ ) for x in images]
# now pad them to do the following operations
_A , _A = self.pad(lowerCAmelCase_ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
_A = torch.true_divide(lowerCAmelCase_ , lowerCAmelCase_ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def snake_case ( snake_case__ :Optional[int] , snake_case__ :Optional[Any]) -> Tuple:
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def snake_case ( snake_case__ :Optional[int] , snake_case__ :Tuple[int, int]) -> Optional[Any]:
assert torch.isfinite(snake_case__).all(), "Box tensor contains infinite or NaN!"
_A , _A = box_size
tensor[:, 0].clamp_(min=0 , max=snake_case__)
tensor[:, 1].clamp_(min=0 , max=snake_case__)
tensor[:, 2].clamp_(min=0 , max=snake_case__)
tensor[:, 3].clamp_(min=0 , max=snake_case__)
| 83 | import colorsys
from PIL import Image # type: ignore
def snake_case ( snake_case__ :float , snake_case__ :float , snake_case__ :int) -> float:
_A = x
_A = y
for step in range(snake_case__): # noqa: B007
_A = a * a - b * b + x
_A = 2 * a * b + y
_A = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def snake_case ( snake_case__ :float) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def snake_case ( snake_case__ :float) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(snake_case__ , 1 , 1))
def snake_case ( snake_case__ :int = 800 , snake_case__ :int = 600 , snake_case__ :float = -0.6 , snake_case__ :float = 0 , snake_case__ :float = 3.2 , snake_case__ :int = 50 , snake_case__ :bool = True , ) -> Image.Image:
_A = Image.new("""RGB""" , (image_width, image_height))
_A = img.load()
# loop through the image-coordinates
for image_x in range(snake_case__):
for image_y in range(snake_case__):
# determine the figure-coordinates based on the image-coordinates
_A = figure_width / image_width * image_height
_A = figure_center_x + (image_x / image_width - 0.5) * figure_width
_A = figure_center_y + (image_y / image_height - 0.5) * figure_height
_A = get_distance(snake_case__ , snake_case__ , snake_case__)
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_A = get_color_coded_rgb(snake_case__)
else:
_A = get_black_and_white_rgb(snake_case__)
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_SCREAMING_SNAKE_CASE = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 83 | 1 |
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def snake_case ( snake_case__ :Optional[int] , snake_case__ :Optional[Any]) -> List[Any]:
_A = []
for i in range(encoder_config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''encoder.deit.blocks.{i}.norm1.weight''', F'''encoder.encoder.layer.{i}.layernorm_before.weight'''))
rename_keys.append((F'''encoder.deit.blocks.{i}.norm1.bias''', F'''encoder.encoder.layer.{i}.layernorm_before.bias'''))
rename_keys.append(
(F'''encoder.deit.blocks.{i}.attn.proj.weight''', F'''encoder.encoder.layer.{i}.attention.output.dense.weight'''))
rename_keys.append(
(F'''encoder.deit.blocks.{i}.attn.proj.bias''', F'''encoder.encoder.layer.{i}.attention.output.dense.bias'''))
rename_keys.append(
(F'''encoder.deit.blocks.{i}.norm2.weight''', F'''encoder.encoder.layer.{i}.layernorm_after.weight'''))
rename_keys.append((F'''encoder.deit.blocks.{i}.norm2.bias''', F'''encoder.encoder.layer.{i}.layernorm_after.bias'''))
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc1.weight''', F'''encoder.encoder.layer.{i}.intermediate.dense.weight'''))
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc1.bias''', F'''encoder.encoder.layer.{i}.intermediate.dense.bias'''))
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc2.weight''', F'''encoder.encoder.layer.{i}.output.dense.weight'''))
rename_keys.append((F'''encoder.deit.blocks.{i}.mlp.fc2.bias''', F'''encoder.encoder.layer.{i}.output.dense.bias'''))
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
])
return rename_keys
def snake_case ( snake_case__ :List[Any] , snake_case__ :List[Any]) -> List[Any]:
for i in range(encoder_config.num_hidden_layers):
# queries, keys and values (only weights, no biases)
_A = state_dict.pop(F'''encoder.deit.blocks.{i}.attn.qkv.weight''')
_A = in_proj_weight[
: encoder_config.hidden_size, :
]
_A = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
_A = in_proj_weight[
-encoder_config.hidden_size :, :
]
def snake_case ( snake_case__ :Optional[Any] , snake_case__ :Optional[Any] , snake_case__ :Any) -> Dict:
_A = dct.pop(snake_case__)
_A = val
def snake_case ( snake_case__ :Optional[int]) -> int:
if "handwritten" in checkpoint_url:
_A = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
_A = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
_A = Image.open(requests.get(snake_case__ , stream=snake_case__).raw).convert("""RGB""")
return im
@torch.no_grad()
def snake_case ( snake_case__ :List[Any] , snake_case__ :Dict) -> List[Any]:
_A = ViTConfig(image_size=384 , qkv_bias=snake_case__)
_A = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
_A = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
_A = 1_024
_A = 4_096
_A = 24
_A = 16
_A = 1_024
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""")
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
_A = False
_A = """relu"""
_A = 1_024
_A = True
_A = False
_A = False
# load HuggingFace model
_A = ViTModel(snake_case__ , add_pooling_layer=snake_case__)
_A = TrOCRForCausalLM(snake_case__)
_A = VisionEncoderDecoderModel(encoder=snake_case__ , decoder=snake_case__)
model.eval()
# load state_dict of original model, rename some keys
_A = torch.hub.load_state_dict_from_url(snake_case__ , map_location="""cpu""" , check_hash=snake_case__)["""model"""]
_A = create_rename_keys(snake_case__ , snake_case__)
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__)
read_in_q_k_v(snake_case__ , snake_case__)
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
_A = state_dict.pop(snake_case__)
if key.startswith("""decoder""") and "output_projection" not in key:
_A = val
else:
_A = val
# load state dict
model.load_state_dict(snake_case__)
# Check outputs on an image
_A = ViTImageProcessor(size=encoder_config.image_size)
_A = RobertaTokenizer.from_pretrained("""roberta-large""")
_A = TrOCRProcessor(snake_case__ , snake_case__)
_A = processor(images=prepare_img(snake_case__) , return_tensors="""pt""").pixel_values
# verify logits
_A = torch.tensor([[model.config.decoder.decoder_start_token_id]])
_A = model(pixel_values=snake_case__ , decoder_input_ids=snake_case__)
_A = outputs.logits
_A = torch.Size([1, 1, 50_265])
if "trocr-base-handwritten" in checkpoint_url:
_A = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311])
elif "trocr-large-handwritten" in checkpoint_url:
_A = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170])
elif "trocr-base-printed" in checkpoint_url:
_A = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210])
elif "trocr-large-printed" in checkpoint_url:
_A = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535])
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , snake_case__ , atol=1E-3), "First elements of logits not as expected"
Path(snake_case__).mkdir(exist_ok=snake_case__)
print(F'''Saving model to {pytorch_dump_folder_path}''')
model.save_pretrained(snake_case__)
print(F'''Saving processor to {pytorch_dump_folder_path}''')
processor.save_pretrained(snake_case__)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt',
type=str,
help='URL to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 83 | import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
_SCREAMING_SNAKE_CASE = datasets.logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
_SCREAMING_SNAKE_CASE = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
_SCREAMING_SNAKE_CASE = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def snake_case ( snake_case__ :Optional[Any] , snake_case__ :str , snake_case__ :List[str]=False , snake_case__ :Dict=False , snake_case__ :Any=True , snake_case__ :List[str]=False , snake_case__ :Optional[Any]="dummy_doc") -> List[Any]:
_A = {doc: key_lines}
_A = {doc: sys_lines}
_A = {}
_A = 0
_A = 0
_A = 0
_A = 0
_A = 0
_A = 0
_A , _A = reader.get_doc_mentions(snake_case__ , key_doc_lines[doc] , snake_case__)
key_singletons_num += singletons_num
if NP_only or min_span:
_A = reader.set_annotated_parse_trees(snake_case__ , key_doc_lines[doc] , snake_case__ , snake_case__)
_A , _A = reader.get_doc_mentions(snake_case__ , sys_doc_lines[doc] , snake_case__)
sys_singletons_num += singletons_num
if NP_only or min_span:
_A = reader.set_annotated_parse_trees(snake_case__ , key_doc_lines[doc] , snake_case__ , snake_case__)
if remove_nested:
_A , _A = reader.remove_nested_coref_mentions(snake_case__ , snake_case__)
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
_A , _A = reader.remove_nested_coref_mentions(snake_case__ , snake_case__)
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
_A = reader.get_mention_assignments(snake_case__ , snake_case__)
_A = reader.get_mention_assignments(snake_case__ , snake_case__)
_A = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''')
logger.info(
"""Number of resulting singleton clusters in the key """
F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''')
if not keep_singletons:
logger.info(
F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
"""files, respectively""")
return doc_coref_infos
def snake_case ( snake_case__ :Tuple , snake_case__ :Tuple , snake_case__ :Dict , snake_case__ :Dict , snake_case__ :Tuple , snake_case__ :Dict , snake_case__ :Tuple) -> int:
_A = get_coref_infos(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__)
_A = {}
_A = 0
_A = 0
for name, metric in metrics:
_A , _A , _A = evaluator.evaluate_documents(snake_case__ , snake_case__ , beta=1)
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa})
logger.info(
name.ljust(10) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , )
if conll_subparts_num == 3:
_A = (conll / 3) * 100
logger.info(F'''CoNLL score: {conll:.2f}''')
output_scores.update({"""conll_score""": conll})
return output_scores
def snake_case ( snake_case__ :Union[str, Any]) -> List[Any]:
_A = False
for line in key_lines:
if not line.startswith("""#"""):
if len(line.split()) > 6:
_A = line.split()[5]
if not parse_col == "-":
_A = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> Union[str, Any]:
_A = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
_A = util.check_gold_parse_annotation(lowerCAmelCase_ )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
_A = evaluate(
key_lines=lowerCAmelCase_ , sys_lines=lowerCAmelCase_ , metrics=lowerCAmelCase_ , NP_only=lowerCAmelCase_ , remove_nested=lowerCAmelCase_ , keep_singletons=lowerCAmelCase_ , min_span=lowerCAmelCase_ , )
return score
| 83 | 1 |
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class a ( __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :Optional[int] = DebertaTokenizer
lowerCamelCase :List[str] = True
lowerCamelCase :int = DebertaTokenizerFast
def UpperCAmelCase ( self ) -> Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_A = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""[UNK]""",
]
_A = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_A = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_A = {"""unk_token""": """[UNK]"""}
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase_ ) )
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Any:
_A = """lower newer"""
_A = """lower newer"""
return input_text, output_text
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.get_tokenizer()
_A = """lower newer"""
_A = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_A = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_A = tokens + [tokenizer.unk_token]
_A = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> int:
_A = self.get_tokenizer()
_A = tokenizer("""Hello""" , """World""" )
_A = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["""token_type_ids"""] , lowerCAmelCase_ )
@slow
def UpperCAmelCase ( self ) -> List[str]:
_A = self.tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
_A = tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCAmelCase_ )
_A = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCAmelCase_ )
_A = tokenizer.encode(
"""sequence builders""" , add_special_tokens=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ )
_A = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ )
_A = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
_A = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def UpperCAmelCase ( self ) -> List[str]:
_A = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
_A = tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
_A = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
_A = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ )
_A = [tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ ) for seq in encoding["""input_ids"""]]
# fmt: off
_A = {
"""input_ids""": [
[1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2]
],
"""token_type_ids""": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
_A = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
self.assertDictEqual(encoding.data , lowerCAmelCase_ )
for expected, decoded in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 83 | import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
_SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
_SCREAMING_SNAKE_CASE = {'facebook/blenderbot_small-90M': 512}
def snake_case ( snake_case__ :Tuple) -> str:
_A = set()
_A = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
_A = char
_A = set(snake_case__)
return pairs
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :List[Any] = VOCAB_FILES_NAMES
lowerCamelCase :Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase :List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase :int = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="__start__" , lowerCAmelCase_="__end__" , lowerCAmelCase_="__unk__" , lowerCAmelCase_="__null__" , **lowerCAmelCase_ , ) -> int:
super().__init__(unk_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , **lowerCAmelCase_ )
with open(lowerCAmelCase_ , encoding="""utf-8""" ) as vocab_handle:
_A = json.load(lowerCAmelCase_ )
_A = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase_ , encoding="""utf-8""" ) as merges_handle:
_A = merges_handle.read().split("""\n""" )[1:-1]
_A = [tuple(merge.split() ) for merge in merges]
_A = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_A = {}
@property
def UpperCAmelCase ( self ) -> int:
return len(self.encoder )
def UpperCAmelCase ( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str:
if token in self.cache:
return self.cache[token]
_A = re.sub("""([.,!?()])""" , r""" \1""" , lowerCAmelCase_ )
_A = re.sub("""(')""" , r""" \1 """ , lowerCAmelCase_ )
_A = re.sub(r"""\s{2,}""" , """ """ , lowerCAmelCase_ )
if "\n" in token:
_A = token.replace("""\n""" , """ __newln__""" )
_A = token.split(""" """ )
_A = []
for token in tokens:
if not len(lowerCAmelCase_ ):
continue
_A = token.lower()
_A = tuple(lowerCAmelCase_ )
_A = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
_A = get_pairs(lowerCAmelCase_ )
if not pairs:
words.append(lowerCAmelCase_ )
continue
while True:
_A = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A = bigram
_A = []
_A = 0
while i < len(lowerCAmelCase_ ):
try:
_A = word.index(lowerCAmelCase_ , lowerCAmelCase_ )
new_word.extend(word[i:j] )
_A = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A = tuple(lowerCAmelCase_ )
_A = new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
_A = get_pairs(lowerCAmelCase_ )
_A = """@@ """.join(lowerCAmelCase_ )
_A = word[:-4]
_A = word
words.append(lowerCAmelCase_ )
return " ".join(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[str]:
_A = []
_A = re.findall(r"""\S+\n?""" , lowerCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(""" """ ) ) )
return split_tokens
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int:
_A = token.lower()
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str:
return self.decoder.get(lowerCAmelCase_ , self.unk_token )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str:
_A = """ """.join(lowerCAmelCase_ ).replace("""@@ """ , """""" ).strip()
return out_string
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_A = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + """\n""" )
_A = 0
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
_A = token_index
writer.write(""" """.join(lowerCAmelCase_ ) + """\n""" )
index += 1
return vocab_file, merge_file
| 83 | 1 |
def snake_case ( ) -> Dict:
_A = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
_A = 6
_A = 1
_A = 1_901
_A = 0
while year < 2_001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
_A = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
_A = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
_A = day - days_per_month[month - 2]
if month > 12:
year += 1
_A = 1
if year < 2_001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 83 | _SCREAMING_SNAKE_CASE = {
'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.',
'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.',
'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----',
'2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...',
'8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.',
':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.',
'?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-',
'(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/'
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
_SCREAMING_SNAKE_CASE = {value: key for key, value in MORSE_CODE_DICT.items()}
def snake_case ( snake_case__ :str) -> str:
return " ".join(MORSE_CODE_DICT[char] for char in message.upper())
def snake_case ( snake_case__ :str) -> str:
return "".join(REVERSE_DICT[char] for char in message.split())
def snake_case ( ) -> None:
_A = """Morse code here!"""
print(snake_case__)
_A = encrypt(snake_case__)
print(snake_case__)
_A = decrypt(snake_case__)
print(snake_case__)
if __name__ == "__main__":
main()
| 83 | 1 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class a :
"""simple docstring"""
@staticmethod
def UpperCAmelCase ( *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[str]:
pass
@is_pipeline_test
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def UpperCAmelCase ( self ) -> Optional[int]:
_A = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , )
_A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_A = image_classifier(lowerCAmelCase_ , candidate_labels=["""a""", """b""", """c"""] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(lowerCAmelCase_ ) , [
[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}],
[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """c"""}, {"""score""": 0.333, """label""": """b"""}],
] , )
_A = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [
[
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
],
[
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
],
[
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
],
[
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
],
[
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
],
] , )
@require_tf
def UpperCAmelCase ( self ) -> Optional[int]:
_A = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , framework="""tf""" )
_A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_A = image_classifier(lowerCAmelCase_ , candidate_labels=["""a""", """b""", """c"""] )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}] , )
_A = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [
[
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
],
[
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
],
[
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
],
[
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
],
[
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
],
] , )
@slow
@require_torch
def UpperCAmelCase ( self ) -> Any:
_A = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , )
# This is an image of 2 cats with remotes and no planes
_A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_A = image_classifier(lowerCAmelCase_ , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
] , )
_A = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [
[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],
]
* 5 , )
@slow
@require_tf
def UpperCAmelCase ( self ) -> Optional[int]:
_A = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , framework="""tf""" )
# This is an image of 2 cats with remotes and no planes
_A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_A = image_classifier(lowerCAmelCase_ , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
] , )
_A = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [
[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],
]
* 5 , )
| 83 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 83 | 1 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
_SCREAMING_SNAKE_CASE = {
'sample_size': 32,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': 1_000,
'block_out_channels': [32, 64],
'attention_head_dim': 8,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
_SCREAMING_SNAKE_CASE = {
'sample_size': 64,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 3,
'num_class_embeds': 1_000,
'block_out_channels': [192, 192 * 2, 192 * 3, 192 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
_SCREAMING_SNAKE_CASE = {
'sample_size': 256,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': None,
'block_out_channels': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'default',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
_SCREAMING_SNAKE_CASE = {
'num_train_timesteps': 40,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
_SCREAMING_SNAKE_CASE = {
'num_train_timesteps': 201,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
_SCREAMING_SNAKE_CASE = {
'num_train_timesteps': 151,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
def snake_case ( snake_case__ :Tuple) -> Optional[int]:
if isinstance(snake_case__ , snake_case__):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("""boolean value expected""")
def snake_case ( snake_case__ :Any , snake_case__ :Dict , snake_case__ :List[Any] , snake_case__ :Optional[Any] , snake_case__ :Union[str, Any]=False) -> Any:
_A = checkpoint[F'''{old_prefix}.in_layers.0.weight''']
_A = checkpoint[F'''{old_prefix}.in_layers.0.bias''']
_A = checkpoint[F'''{old_prefix}.in_layers.2.weight''']
_A = checkpoint[F'''{old_prefix}.in_layers.2.bias''']
_A = checkpoint[F'''{old_prefix}.emb_layers.1.weight''']
_A = checkpoint[F'''{old_prefix}.emb_layers.1.bias''']
_A = checkpoint[F'''{old_prefix}.out_layers.0.weight''']
_A = checkpoint[F'''{old_prefix}.out_layers.0.bias''']
_A = checkpoint[F'''{old_prefix}.out_layers.3.weight''']
_A = checkpoint[F'''{old_prefix}.out_layers.3.bias''']
if has_skip:
_A = checkpoint[F'''{old_prefix}.skip_connection.weight''']
_A = checkpoint[F'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def snake_case ( snake_case__ :List[str] , snake_case__ :Tuple , snake_case__ :List[Any] , snake_case__ :List[Any] , snake_case__ :Optional[Any]=None) -> List[Any]:
_A , _A , _A = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0)
_A , _A , _A = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0)
_A = checkpoint[F'''{old_prefix}.norm.weight''']
_A = checkpoint[F'''{old_prefix}.norm.bias''']
_A = weight_q.squeeze(-1).squeeze(-1)
_A = bias_q.squeeze(-1).squeeze(-1)
_A = weight_k.squeeze(-1).squeeze(-1)
_A = bias_k.squeeze(-1).squeeze(-1)
_A = weight_v.squeeze(-1).squeeze(-1)
_A = bias_v.squeeze(-1).squeeze(-1)
_A = (
checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1).squeeze(-1)
)
_A = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1).squeeze(-1)
return new_checkpoint
def snake_case ( snake_case__ :str , snake_case__ :Optional[int]) -> Union[str, Any]:
_A = torch.load(snake_case__ , map_location="""cpu""")
_A = {}
_A = checkpoint["""time_embed.0.weight"""]
_A = checkpoint["""time_embed.0.bias"""]
_A = checkpoint["""time_embed.2.weight"""]
_A = checkpoint["""time_embed.2.bias"""]
if unet_config["num_class_embeds"] is not None:
_A = checkpoint["""label_emb.weight"""]
_A = checkpoint["""input_blocks.0.0.weight"""]
_A = checkpoint["""input_blocks.0.0.bias"""]
_A = unet_config["""down_block_types"""]
_A = unet_config["""layers_per_block"""]
_A = unet_config["""attention_head_dim"""]
_A = unet_config["""block_out_channels"""]
_A = 1
_A = channels_list[0]
for i, layer_type in enumerate(snake_case__):
_A = channels_list[i]
_A = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(snake_case__):
_A = F'''down_blocks.{i}.resnets.{j}'''
_A = F'''input_blocks.{current_layer}.0'''
_A = True if j == 0 and downsample_block_has_skip else False
_A = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_skip=snake_case__)
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(snake_case__):
_A = F'''down_blocks.{i}.resnets.{j}'''
_A = F'''input_blocks.{current_layer}.0'''
_A = True if j == 0 and downsample_block_has_skip else False
_A = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_skip=snake_case__)
_A = F'''down_blocks.{i}.attentions.{j}'''
_A = F'''input_blocks.{current_layer}.1'''
_A = convert_attention(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__)
current_layer += 1
if i != len(snake_case__) - 1:
_A = F'''down_blocks.{i}.downsamplers.0'''
_A = F'''input_blocks.{current_layer}.0'''
_A = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__)
current_layer += 1
_A = current_channels
# hardcoded the mid-block for now
_A = """mid_block.resnets.0"""
_A = """middle_block.0"""
_A = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__)
_A = """mid_block.attentions.0"""
_A = """middle_block.1"""
_A = convert_attention(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__)
_A = """mid_block.resnets.1"""
_A = """middle_block.2"""
_A = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__)
_A = 0
_A = unet_config["""up_block_types"""]
for i, layer_type in enumerate(snake_case__):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1):
_A = F'''up_blocks.{i}.resnets.{j}'''
_A = F'''output_blocks.{current_layer}.0'''
_A = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_skip=snake_case__)
current_layer += 1
if i != len(snake_case__) - 1:
_A = F'''up_blocks.{i}.upsamplers.0'''
_A = F'''output_blocks.{current_layer-1}.1'''
_A = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__)
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1):
_A = F'''up_blocks.{i}.resnets.{j}'''
_A = F'''output_blocks.{current_layer}.0'''
_A = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_skip=snake_case__)
_A = F'''up_blocks.{i}.attentions.{j}'''
_A = F'''output_blocks.{current_layer}.1'''
_A = convert_attention(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__)
current_layer += 1
if i != len(snake_case__) - 1:
_A = F'''up_blocks.{i}.upsamplers.0'''
_A = F'''output_blocks.{current_layer-1}.2'''
_A = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__)
_A = checkpoint["""out.0.weight"""]
_A = checkpoint["""out.0.bias"""]
_A = checkpoint["""out.2.weight"""]
_A = checkpoint["""out.2.bias"""]
return new_checkpoint
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('--unet_path', default=None, type=str, required=True, help='Path to the unet.pt to convert.')
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output the converted UNet model.'
)
parser.add_argument('--class_cond', default=True, type=str, help='Whether the model is class-conditional.')
_SCREAMING_SNAKE_CASE = parser.parse_args()
_SCREAMING_SNAKE_CASE = strabool(args.class_cond)
_SCREAMING_SNAKE_CASE = os.path.basename(args.unet_path)
print(F'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
_SCREAMING_SNAKE_CASE = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_SCREAMING_SNAKE_CASE = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
_SCREAMING_SNAKE_CASE = TEST_UNET_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = con_pt_to_diffuser(args.unet_path, unet_config)
_SCREAMING_SNAKE_CASE = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
_SCREAMING_SNAKE_CASE = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
_SCREAMING_SNAKE_CASE = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_SCREAMING_SNAKE_CASE = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
_SCREAMING_SNAKE_CASE = CMStochasticIterativeScheduler(**scheduler_config)
_SCREAMING_SNAKE_CASE = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 83 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Tuple = '''philschmid/bart-large-cnn-samsum'''
lowerCamelCase :Tuple = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
lowerCamelCase :List[Any] = '''summarizer'''
lowerCamelCase :List[str] = AutoTokenizer
lowerCamelCase :Dict = AutoModelForSeqaSeqLM
lowerCamelCase :int = ['''text''']
lowerCamelCase :List[Any] = ['''text''']
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
return self.pre_processor(lowerCAmelCase_ , return_tensors="""pt""" , truncation=lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
return self.model.generate(**lowerCAmelCase_ )[0]
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
return self.pre_processor.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
| 83 | 1 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
_SCREAMING_SNAKE_CASE = 'pytorch_model.bin'
_SCREAMING_SNAKE_CASE = 'pytorch_model.bin.index.json'
_SCREAMING_SNAKE_CASE = 'adapter_config.json'
_SCREAMING_SNAKE_CASE = 'adapter_model.bin'
_SCREAMING_SNAKE_CASE = 'adapter_model.safetensors'
_SCREAMING_SNAKE_CASE = 'tf_model.h5'
_SCREAMING_SNAKE_CASE = 'tf_model.h5.index.json'
_SCREAMING_SNAKE_CASE = 'model.ckpt'
_SCREAMING_SNAKE_CASE = 'flax_model.msgpack'
_SCREAMING_SNAKE_CASE = 'flax_model.msgpack.index.json'
_SCREAMING_SNAKE_CASE = 'model.safetensors'
_SCREAMING_SNAKE_CASE = 'model.safetensors.index.json'
_SCREAMING_SNAKE_CASE = 'config.json'
_SCREAMING_SNAKE_CASE = 'preprocessor_config.json'
_SCREAMING_SNAKE_CASE = FEATURE_EXTRACTOR_NAME
_SCREAMING_SNAKE_CASE = 'generation_config.json'
_SCREAMING_SNAKE_CASE = 'modelcard.json'
_SCREAMING_SNAKE_CASE = '▁'
_SCREAMING_SNAKE_CASE = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
_SCREAMING_SNAKE_CASE = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
_SCREAMING_SNAKE_CASE = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
_SCREAMING_SNAKE_CASE = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def snake_case ( snake_case__ :int) -> Any:
if version.parse(snake_case__) < version.parse(snake_case__):
if "dev" in min_version:
_A = (
"""This example requires a source install from HuggingFace Transformers (see """
"""`https://huggingface.co/docs/transformers/installation#install-from-source`),"""
)
else:
_A = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ """Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other """
"""versions of HuggingFace Transformers.""")
| 83 | import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
_SCREAMING_SNAKE_CASE = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def snake_case ( snake_case__ :Union[str, Any]) -> Dict:
_A = torch.load(snake_case__ , map_location="""cpu""")
return sd
def snake_case ( snake_case__ :List[str] , snake_case__ :Optional[Any] , snake_case__ :int=rename_keys_prefix) -> Optional[Any]:
_A = OrderedDict()
_A = torch.arange(config.max_position_embeddings).expand((1, -1))
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_A = key
for name_pair in rename_keys_prefix:
_A = new_key.replace(name_pair[0] , name_pair[1])
_A = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_A = new_d["""cls.predictions.bias"""]
return new_d
@torch.no_grad()
def snake_case ( snake_case__ :Tuple , snake_case__ :Tuple) -> int:
assert (
checkpoint_path.split("""/""")[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
_A = """pretraining"""
if "vcr" in checkpoint_path:
_A = {"""visual_embedding_dim""": 512}
elif "vqa_advanced" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048}
elif "vqa" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048}
elif "nlvr" in checkpoint_path:
_A = {"""visual_embedding_dim""": 1_024}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''')
else:
if "vcr" in checkpoint_path:
_A = {"""visual_embedding_dim""": 512}
_A = """multichoice"""
elif "vqa_advanced" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048}
_A = """vqa_advanced"""
elif "vqa" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048, """num_labels""": 3_129}
_A = """vqa"""
elif "nlvr" in checkpoint_path:
_A = {
"""visual_embedding_dim""": 1_024,
"""num_labels""": 2,
}
_A = """nlvr"""
_A = VisualBertConfig(**snake_case__)
# Load State Dict
_A = load_state_dict(snake_case__)
_A = get_new_dict(snake_case__ , snake_case__)
if model_type == "pretraining":
_A = VisualBertForPreTraining(snake_case__)
elif model_type == "vqa":
_A = VisualBertForQuestionAnswering(snake_case__)
elif model_type == "nlvr":
_A = VisualBertForVisualReasoning(snake_case__)
elif model_type == "multichoice":
_A = VisualBertForMultipleChoice(snake_case__)
model.load_state_dict(snake_case__)
# Save Checkpoints
Path(snake_case__).mkdir(exist_ok=snake_case__)
model.save_pretrained(snake_case__)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 83 | 1 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class a ( __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :List[str] = CTRLTokenizer
lowerCamelCase :Optional[int] = False
lowerCamelCase :int = False
def UpperCAmelCase ( self ) -> Optional[int]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_A = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""]
_A = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_A = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""]
_A = {"""unk_token""": """<unk>"""}
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase_ ) )
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Dict:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]:
_A = """adapt react readapt apt"""
_A = """adapt react readapt apt"""
return input_text, output_text
def UpperCAmelCase ( self ) -> Optional[int]:
_A = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_A = """adapt react readapt apt"""
_A = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split()
_A = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_A = tokens + [tokenizer.unk_token]
_A = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ )
| 83 | from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class a ( __lowerCAmelCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[str]:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def UpperCAmelCase ( self ) -> Optional[int]:
_A = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self._create_example_records()
_A = Dataset.from_list(lowerCAmelCase_ )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(lowerCAmelCase_ ):
self.assertDictEqual(lowerCAmelCase_ , example_records[i] )
def UpperCAmelCase ( self ) -> str:
_A = self._create_example_records()
_A = Dataset.from_list(lowerCAmelCase_ )
_A = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def UpperCAmelCase ( self ) -> Any: # checks what happens with missing columns
_A = [{"""col_1""": 1}, {"""col_2""": """x"""}]
_A = Dataset.from_list(lowerCAmelCase_ )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def UpperCAmelCase ( self ) -> Tuple: # checks if the type can be inferred from the second record
_A = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
_A = Dataset.from_list(lowerCAmelCase_ )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def UpperCAmelCase ( self ) -> Any:
_A = Dataset.from_list([] )
self.assertEqual(len(lowerCAmelCase_ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 83 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
super().__init__()
# make sure scheduler can always be converted to DDIM
_A = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
@torch.no_grad()
def __call__( self , lowerCAmelCase_ = 1 , lowerCAmelCase_ = None , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = 50 , lowerCAmelCase_ = None , lowerCAmelCase_ = "pil" , lowerCAmelCase_ = True , ) -> Union[ImagePipelineOutput, Tuple]:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , lowerCAmelCase_ ):
_A = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
_A = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(lowerCAmelCase_ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
_A = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowerCAmelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_A = self.unet(lowerCAmelCase_ , lowerCAmelCase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_A = self.scheduler.step(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , eta=lowerCAmelCase_ , use_clipped_model_output=lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_A = (image / 2 + 0.5).clamp(0 , 1 )
_A = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_A = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 83 | def snake_case ( snake_case__ :int = 1_000_000) -> int:
_A = set(range(3 , snake_case__ , 2))
primes.add(2)
for p in range(3 , snake_case__ , 2):
if p not in primes:
continue
primes.difference_update(set(range(p * p , snake_case__ , snake_case__)))
_A = [float(snake_case__) for n in range(limit + 1)]
for p in primes:
for n in range(snake_case__ , limit + 1 , snake_case__):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:]))
if __name__ == "__main__":
print(F'''{solution() = }''')
| 83 | 1 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
_SCREAMING_SNAKE_CASE = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def snake_case ( snake_case__ :Any , snake_case__ :List[Any] , snake_case__ :str , snake_case__ :Optional[Any] , snake_case__ :List[Any]) -> Optional[Any]:
for attribute in key.split("""."""):
_A = getattr(snake_case__ , snake_case__)
if weight_type is not None:
_A = getattr(snake_case__ , snake_case__).shape
else:
_A = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_A = value
elif weight_type == "weight_g":
_A = value
elif weight_type == "weight_v":
_A = value
elif weight_type == "bias":
_A = value
else:
_A = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''')
def snake_case ( snake_case__ :List[Any] , snake_case__ :Optional[Any]) -> Optional[int]:
_A = []
_A = fairseq_model.state_dict()
_A = hf_model.feature_extractor
for name, value in fairseq_dict.items():
_A = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == """group""" , )
_A = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""")[-1] == name.split(""".""")[0]:
_A = True
if "*" in mapped_key:
_A = name.split(snake_case__)[0].split(""".""")[-2]
_A = mapped_key.replace("""*""" , snake_case__)
if "weight_g" in name:
_A = """weight_g"""
elif "weight_v" in name:
_A = """weight_v"""
elif "bias" in name and "relative_attention_bias" not in name:
_A = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_A = """weight"""
else:
_A = None
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__)
continue
if not is_used:
unused_weights.append(snake_case__)
logger.warning(F'''Unused weights: {unused_weights}''')
def snake_case ( snake_case__ :Tuple , snake_case__ :List[str] , snake_case__ :List[str] , snake_case__ :Dict , snake_case__ :Union[str, Any]) -> List[Any]:
_A = full_name.split("""conv_layers.""")[-1]
_A = name.split(""".""")
_A = int(items[0])
_A = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_A = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_A = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_A = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_A = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
else:
unused_weights.append(snake_case__)
@torch.no_grad()
def snake_case ( snake_case__ :int , snake_case__ :Dict , snake_case__ :Any=None) -> Any:
# load the pre-trained checkpoints
_A = torch.load(snake_case__)
_A = WavLMConfigOrig(checkpoint["""cfg"""])
_A = WavLMOrig(snake_case__)
model.load_state_dict(checkpoint["""model"""])
model.eval()
if config_path is not None:
_A = WavLMConfig.from_pretrained(snake_case__)
else:
_A = WavLMConfig()
_A = WavLMModel(snake_case__)
recursively_load_weights(snake_case__ , snake_case__)
hf_wavlm.save_pretrained(snake_case__)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 83 | import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_="None" , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ) -> Union[str, Any]:
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = relative_attention
_A = position_biased_input
_A = pos_att_type
_A = scope
def UpperCAmelCase ( self ) -> Dict:
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self ) -> Optional[int]:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Any:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_A = DebertaVaModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
_A = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
_A = model(lowerCAmelCase_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_A = DebertaVaForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_A = self.num_labels
_A = DebertaVaForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_A = self.num_labels
_A = DebertaVaForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
_A = DebertaVaForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_A = DebertaVaForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :int = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCamelCase :str = (
{
'''feature-extraction''': DebertaVaModel,
'''fill-mask''': DebertaVaForMaskedLM,
'''question-answering''': DebertaVaForQuestionAnswering,
'''text-classification''': DebertaVaForSequenceClassification,
'''token-classification''': DebertaVaForTokenClassification,
'''zero-shot''': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase :str = True
lowerCamelCase :Union[str, Any] = False
lowerCamelCase :Optional[int] = False
lowerCamelCase :List[str] = False
lowerCamelCase :str = False
def UpperCAmelCase ( self ) -> Optional[int]:
_A = DebertaVaModelTester(self )
_A = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def UpperCAmelCase ( self ) -> List[str]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> List[str]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Any:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> int:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*lowerCAmelCase_ )
@slow
def UpperCAmelCase ( self ) -> Any:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = DebertaVaModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="""Model not available yet""" )
def UpperCAmelCase ( self ) -> int:
pass
@slow
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
_A = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
_A = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
# compare the actual values for a slice.
_A = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1E-4 ) , F'''{output[:, 1:4, 1:4]}''' )
| 83 | 1 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
_SCREAMING_SNAKE_CASE = random.Random()
if is_torch_available():
import torch
def snake_case ( snake_case__ :List[Any] , snake_case__ :Optional[int]=1.0 , snake_case__ :Optional[int]=None , snake_case__ :Dict=None) -> str:
if rng is None:
_A = global_rng
_A = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=7 , lowerCAmelCase_=4_00 , lowerCAmelCase_=20_00 , lowerCAmelCase_=1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=1_60_00 , lowerCAmelCase_=True , lowerCAmelCase_=True , ) -> List[Any]:
_A = parent
_A = batch_size
_A = min_seq_length
_A = max_seq_length
_A = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A = feature_size
_A = padding_value
_A = sampling_rate
_A = return_attention_mask
_A = do_normalize
def UpperCAmelCase ( self ) -> str:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCAmelCase ( self , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> Dict:
def _flatten(lowerCAmelCase_ ):
return list(itertools.chain(*lowerCAmelCase_ ) )
if equal_length:
_A = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_A = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A = [np.asarray(lowerCAmelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class a ( __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :List[str] = ASTFeatureExtractor
def UpperCAmelCase ( self ) -> Tuple:
_A = ASTFeatureExtractionTester(self )
def UpperCAmelCase ( self ) -> Optional[Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
_A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_A = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
_A = [np.asarray(lowerCAmelCase_ ) for speech_input in speech_inputs]
# Test not batched input
_A = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
_A = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 ) )
# Test batched
_A = feat_extract(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors="""np""" ).input_values
_A = feat_extract(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_A = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
_A = np.asarray(lowerCAmelCase_ )
_A = feat_extract(lowerCAmelCase_ , return_tensors="""np""" ).input_values
_A = feat_extract(lowerCAmelCase_ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 ) )
@require_torch
def UpperCAmelCase ( self ) -> Optional[Any]:
import torch
_A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A = np.random.rand(1_00 ).astype(np.floataa )
_A = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_A = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_A = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int:
from datasets import load_dataset
_A = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_A = ds.sort("""id""" ).select(range(lowerCAmelCase_ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
@require_torch
def UpperCAmelCase ( self ) -> Union[str, Any]:
# fmt: off
_A = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
_A = self._load_datasamples(1 )
_A = ASTFeatureExtractor()
_A = feature_extractor(lowerCAmelCase_ , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 10_24, 1_28) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCAmelCase_ , atol=1E-4 ) )
| 83 | def snake_case ( snake_case__ :int , snake_case__ :int) -> int:
return int(input_a == input_a == 0)
def snake_case ( ) -> None:
print("""Truth Table of NOR Gate:""")
print("""| Input 1 | Input 2 | Output |""")
print(F'''| 0 | 0 | {nor_gate(0 , 0)} |''')
print(F'''| 0 | 1 | {nor_gate(0 , 1)} |''')
print(F'''| 1 | 0 | {nor_gate(1 , 0)} |''')
print(F'''| 1 | 1 | {nor_gate(1 , 1)} |''')
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 83 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 83 | import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=sys.maxsize ) -> str:
_A = """bilinear"""
_A = max_size
_A = short_edge_length
def __call__( self , lowerCAmelCase_ ) -> Optional[Any]:
_A = []
for img in imgs:
_A , _A = img.shape[:2]
# later: provide list and randomly choose index for resize
_A = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
_A = size * 1.0 / min(lowerCAmelCase_ , lowerCAmelCase_ )
if h < w:
_A , _A = size, scale * w
else:
_A , _A = scale * h, size
if max(lowerCAmelCase_ , lowerCAmelCase_ ) > self.max_size:
_A = self.max_size * 1.0 / max(lowerCAmelCase_ , lowerCAmelCase_ )
_A = newh * scale
_A = neww * scale
_A = int(neww + 0.5 )
_A = int(newh + 0.5 )
if img.dtype == np.uinta:
_A = Image.fromarray(lowerCAmelCase_ )
_A = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
_A = np.asarray(lowerCAmelCase_ )
else:
_A = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
_A = nn.functional.interpolate(
lowerCAmelCase_ , (newh, neww) , mode=self.interp_method , align_corners=lowerCAmelCase_ ).squeeze(0 )
img_augs.append(lowerCAmelCase_ )
return img_augs
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ ) -> List[Any]:
_A = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
_A = cfg.INPUT.FORMAT
_A = cfg.SIZE_DIVISIBILITY
_A = cfg.PAD_VALUE
_A = cfg.INPUT.MAX_SIZE_TEST
_A = cfg.MODEL.DEVICE
_A = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_A = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_A = lambda lowerCAmelCase_ : (x - self.pixel_mean) / self.pixel_std
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
_A = tuple(max(lowerCAmelCase_ ) for s in zip(*[img.shape for img in images] ) )
_A = [im.shape[-2:] for im in images]
_A = [
nn.functional.pad(
lowerCAmelCase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(lowerCAmelCase_ , lowerCAmelCase_ )
]
return torch.stack(lowerCAmelCase_ ), torch.tensor(lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_=False ) -> int:
with torch.no_grad():
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A = [images]
if single_image:
assert len(lowerCAmelCase_ ) == 1
for i in range(len(lowerCAmelCase_ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(lowerCAmelCase_ , images.pop(lowerCAmelCase_ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
lowerCAmelCase_ , torch.as_tensor(img_tensorize(images.pop(lowerCAmelCase_ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
_A = torch.tensor([im.shape[:2] for im in images] )
_A = self.aug(lowerCAmelCase_ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
_A = [self.normalizer(lowerCAmelCase_ ) for x in images]
# now pad them to do the following operations
_A , _A = self.pad(lowerCAmelCase_ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
_A = torch.true_divide(lowerCAmelCase_ , lowerCAmelCase_ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def snake_case ( snake_case__ :Optional[int] , snake_case__ :Optional[Any]) -> Tuple:
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def snake_case ( snake_case__ :Optional[int] , snake_case__ :Tuple[int, int]) -> Optional[Any]:
assert torch.isfinite(snake_case__).all(), "Box tensor contains infinite or NaN!"
_A , _A = box_size
tensor[:, 0].clamp_(min=0 , max=snake_case__)
tensor[:, 1].clamp_(min=0 , max=snake_case__)
tensor[:, 2].clamp_(min=0 , max=snake_case__)
tensor[:, 3].clamp_(min=0 , max=snake_case__)
| 83 | 1 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = 32
def snake_case ( snake_case__ :Accelerator , snake_case__ :int = 16 , snake_case__ :str = "bert-base-cased") -> List[Any]:
_A = AutoTokenizer.from_pretrained(snake_case__)
_A = load_dataset("""glue""" , """mrpc""")
def tokenize_function(snake_case__ :List[str]):
# max_length=None => use the model max length (it's actually the default)
_A = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case__ , max_length=snake_case__)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_A = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=snake_case__)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_A = tokenized_datasets.rename_column("""label""" , """labels""")
def collate_fn(snake_case__ :int):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""")
return tokenizer.pad(snake_case__ , padding="""longest""" , return_tensors="""pt""")
# Instantiate dataloaders.
_A = DataLoader(
tokenized_datasets["""train"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__)
_A = DataLoader(
tokenized_datasets["""validation"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__)
return train_dataloader, eval_dataloader
def snake_case ( snake_case__ :Dict , snake_case__ :List[Any]) -> List[Any]:
# Initialize accelerator
_A = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_A = config["""lr"""]
_A = int(config["""num_epochs"""])
_A = int(config["""seed"""])
_A = int(config["""batch_size"""])
_A = args.model_name_or_path
set_seed(snake_case__)
_A , _A = get_dataloaders(snake_case__ , snake_case__ , snake_case__)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_A = AutoModelForSequenceClassification.from_pretrained(snake_case__ , return_dict=snake_case__)
# Instantiate optimizer
_A = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_A = optimizer_cls(params=model.parameters() , lr=snake_case__)
if accelerator.state.deepspeed_plugin is not None:
_A = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
_A = 1
_A = (len(snake_case__) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_A = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=0 , num_training_steps=snake_case__ , )
else:
_A = DummyScheduler(snake_case__ , total_num_steps=snake_case__ , warmup_num_steps=0)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_A , _A , _A , _A , _A = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__)
# We need to keep track of how many total steps we have iterated over
_A = 0
# We also need to keep track of the stating epoch so files are named properly
_A = 0
# Now we train the model
_A = evaluate.load("""glue""" , """mrpc""")
_A = 0
_A = {}
for epoch in range(snake_case__ , snake_case__):
model.train()
for step, batch in enumerate(snake_case__):
_A = model(**snake_case__)
_A = outputs.loss
_A = loss / gradient_accumulation_steps
accelerator.backward(snake_case__)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
_A = 0
for step, batch in enumerate(snake_case__):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
_A = model(**snake_case__)
_A = outputs.logits.argmax(dim=-1)
# It is slightly faster to call this once, than multiple times
_A , _A = accelerator.gather(
(predictions, batch["""labels"""])) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(snake_case__) - 1:
_A = predictions[: len(eval_dataloader.dataset) - samples_seen]
_A = references[: len(eval_dataloader.dataset) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
_A = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , snake_case__)
_A = eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
_A = eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """all_results.json""") , """w""") as f:
json.dump(snake_case__ , snake_case__)
def snake_case ( ) -> List[Any]:
_A = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""")
parser.add_argument(
"""--model_name_or_path""" , type=snake_case__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=snake_case__ , )
parser.add_argument(
"""--output_dir""" , type=snake_case__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--performance_lower_bound""" , type=snake_case__ , default=snake_case__ , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , )
parser.add_argument(
"""--num_epochs""" , type=snake_case__ , default=3 , help="""Number of train epochs.""" , )
_A = parser.parse_args()
_A = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(snake_case__ , snake_case__)
if __name__ == "__main__":
main()
| 83 | from collections import defaultdict
def snake_case ( snake_case__ :int) -> int:
_A = 1
_A = True
for v in tree[start]:
if v not in visited:
ret += dfs(snake_case__)
if ret % 2 == 0:
cuts.append(snake_case__)
return ret
def snake_case ( ) -> Any:
dfs(1)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 10, 9
_SCREAMING_SNAKE_CASE = defaultdict(list)
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 83 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :List[Any] = '''cvt'''
def __init__( self , lowerCAmelCase_=3 , lowerCAmelCase_=[7, 3, 3] , lowerCAmelCase_=[4, 2, 2] , lowerCAmelCase_=[2, 1, 1] , lowerCAmelCase_=[64, 1_92, 3_84] , lowerCAmelCase_=[1, 3, 6] , lowerCAmelCase_=[1, 2, 10] , lowerCAmelCase_=[4.0, 4.0, 4.0] , lowerCAmelCase_=[0.0, 0.0, 0.0] , lowerCAmelCase_=[0.0, 0.0, 0.0] , lowerCAmelCase_=[0.0, 0.0, 0.1] , lowerCAmelCase_=[True, True, True] , lowerCAmelCase_=[False, False, True] , lowerCAmelCase_=["dw_bn", "dw_bn", "dw_bn"] , lowerCAmelCase_=[3, 3, 3] , lowerCAmelCase_=[1, 1, 1] , lowerCAmelCase_=[2, 2, 2] , lowerCAmelCase_=[1, 1, 1] , lowerCAmelCase_=[1, 1, 1] , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-12 , **lowerCAmelCase_ , ) -> Optional[Any]:
super().__init__(**lowerCAmelCase_ )
_A = num_channels
_A = patch_sizes
_A = patch_stride
_A = patch_padding
_A = embed_dim
_A = num_heads
_A = depth
_A = mlp_ratio
_A = attention_drop_rate
_A = drop_rate
_A = drop_path_rate
_A = qkv_bias
_A = cls_token
_A = qkv_projection_method
_A = kernel_qkv
_A = padding_kv
_A = stride_kv
_A = padding_q
_A = stride_q
_A = initializer_range
_A = layer_norm_eps
| 83 | import heapq
def snake_case ( snake_case__ :dict) -> set[int]:
_A = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(snake_case__ , [-1 * len(snake_case__), (key, value)])
# chosen_vertices = set of chosen vertices
_A = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
_A = heapq.heappop(snake_case__)[1][0]
chosen_vertices.add(snake_case__)
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
_A = elem[1][1].index(snake_case__)
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(snake_case__)
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 83 | 1 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a :
"""simple docstring"""
def __init__( self ) -> int:
_A = """"""
_A = """"""
_A = []
_A = 0
_A = 2_56
_A = 0
_A = 0
_A = 0
_A = 0
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int:
_A = cva.imread(lowerCAmelCase_ , 0 )
_A = copy.deepcopy(self.img )
_A , _A , _A = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label="""x""" )
_A = np.sum(lowerCAmelCase_ )
for i in range(len(lowerCAmelCase_ ) ):
_A = x[i] / self.k
self.sk += prk
_A = (self.L - 1) * self.sk
if self.rem != 0:
_A = int(last % last )
_A = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowerCAmelCase_ )
_A = int(np.ma.count(self.img ) / self.img[1].size )
_A = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
_A = self.img[j][i]
if num != self.last_list[num]:
_A = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img )
def UpperCAmelCase ( self ) -> Union[str, Any]:
plt.hist(self.img.ravel() , 2_56 , [0, 2_56] )
def UpperCAmelCase ( self ) -> Tuple:
cva.imshow("""Output-Image""" , self.img )
cva.imshow("""Input-Image""" , self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
_SCREAMING_SNAKE_CASE = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 83 | import math
import unittest
def snake_case ( snake_case__ :int) -> bool:
assert isinstance(snake_case__ , snake_case__) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def UpperCAmelCase ( self ) -> Dict:
with self.assertRaises(lowerCAmelCase_ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 83 | 1 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass(frozen=__lowerCAmelCase )
class a :
"""simple docstring"""
lowerCamelCase :str
lowerCamelCase :str
lowerCamelCase :Optional[str] = None
lowerCamelCase :Optional[str] = None
lowerCamelCase :Optional[str] = None
@dataclass(frozen=__lowerCAmelCase )
class a :
"""simple docstring"""
lowerCamelCase :List[int]
lowerCamelCase :Optional[List[int]] = None
lowerCamelCase :Optional[List[int]] = None
lowerCamelCase :Optional[Union[int, float]] = None
lowerCamelCase :Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :List[InputFeatures]
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_=False , lowerCAmelCase_ = False , ) -> Dict:
_A = hans_processors[task]()
_A = os.path.join(
lowerCAmelCase_ , """cached_{}_{}_{}_{}""".format(
"""dev""" if evaluate else """train""" , tokenizer.__class__.__name__ , str(lowerCAmelCase_ ) , lowerCAmelCase_ , ) , )
_A = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_A , _A = label_list[2], label_list[1]
_A = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_A = cached_features_file + """.lock"""
with FileLock(lowerCAmelCase_ ):
if os.path.exists(lowerCAmelCase_ ) and not overwrite_cache:
logger.info(F'''Loading features from cached file {cached_features_file}''' )
_A = torch.load(lowerCAmelCase_ )
else:
logger.info(F'''Creating features from dataset file at {data_dir}''' )
_A = (
processor.get_dev_examples(lowerCAmelCase_ ) if evaluate else processor.get_train_examples(lowerCAmelCase_ )
)
logger.info("""Training examples: %s""" , len(lowerCAmelCase_ ) )
_A = hans_convert_examples_to_features(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
logger.info("""Saving features into cached file %s""" , lowerCAmelCase_ )
torch.save(self.features , lowerCAmelCase_ )
def __len__( self ) -> List[Any]:
return len(self.features )
def __getitem__( self , lowerCAmelCase_ ) -> InputFeatures:
return self.features[i]
def UpperCAmelCase ( self ) -> Optional[Any]:
return self.label_list
if is_tf_available():
import tensorflow as tf
class a :
"""simple docstring"""
lowerCamelCase :List[InputFeatures]
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1_28 , lowerCAmelCase_=False , lowerCAmelCase_ = False , ) -> Optional[Any]:
_A = hans_processors[task]()
_A = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_A , _A = label_list[2], label_list[1]
_A = label_list
_A = processor.get_dev_examples(lowerCAmelCase_ ) if evaluate else processor.get_train_examples(lowerCAmelCase_ )
_A = hans_convert_examples_to_features(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="""convert examples to features""" ):
if ex_index % 1_00_00 == 0:
logger.info("""Writing example %d of %d""" % (ex_index, len(lowerCAmelCase_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
_A = tf.data.Dataset.from_generator(
lowerCAmelCase_ , (
{
"""example_id""": tf.intaa,
"""input_ids""": tf.intaa,
"""attention_mask""": tf.intaa,
"""token_type_ids""": tf.intaa,
},
tf.intaa,
) , (
{
"""example_id""": tf.TensorShape([] ),
"""input_ids""": tf.TensorShape([None, None] ),
"""attention_mask""": tf.TensorShape([None, None] ),
"""token_type_ids""": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def UpperCAmelCase ( self ) -> Dict:
return self.dataset
def __len__( self ) -> Dict:
return len(self.features )
def __getitem__( self , lowerCAmelCase_ ) -> InputFeatures:
return self.features[i]
def UpperCAmelCase ( self ) -> List[Any]:
return self.label_list
class a ( __lowerCAmelCase ):
"""simple docstring"""
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]:
return self._create_examples(self._read_tsv(os.path.join(lowerCAmelCase_ , """heuristics_train_set.txt""" ) ) , """train""" )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
return self._create_examples(self._read_tsv(os.path.join(lowerCAmelCase_ , """heuristics_evaluation_set.txt""" ) ) , """dev""" )
def UpperCAmelCase ( self ) -> Union[str, Any]:
return ["contradiction", "entailment", "neutral"]
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_A = []
for i, line in enumerate(lowerCAmelCase_ ):
if i == 0:
continue
_A = """%s-%s""" % (set_type, line[0])
_A = line[5]
_A = line[6]
_A = line[7][2:] if line[7].startswith("""ex""" ) else line[7]
_A = line[0]
examples.append(InputExample(guid=lowerCAmelCase_ , text_a=lowerCAmelCase_ , text_b=lowerCAmelCase_ , label=lowerCAmelCase_ , pairID=lowerCAmelCase_ ) )
return examples
def snake_case ( snake_case__ :List[InputExample] , snake_case__ :List[str] , snake_case__ :int , snake_case__ :PreTrainedTokenizer , ) -> Optional[int]:
_A = {label: i for i, label in enumerate(snake_case__)}
_A = []
for ex_index, example in tqdm.tqdm(enumerate(snake_case__) , desc="""convert examples to features"""):
if ex_index % 10_000 == 0:
logger.info("""Writing example %d""" % (ex_index))
_A = tokenizer(
example.text_a , example.text_b , add_special_tokens=snake_case__ , max_length=snake_case__ , padding="""max_length""" , truncation=snake_case__ , return_overflowing_tokens=snake_case__ , )
_A = label_map[example.label] if example.label in label_map else 0
_A = int(example.pairID)
features.append(InputFeatures(**snake_case__ , label=snake_case__ , pairID=snake_case__))
for i, example in enumerate(examples[:5]):
logger.info("""*** Example ***""")
logger.info(F'''guid: {example}''')
logger.info(F'''features: {features[i]}''')
return features
_SCREAMING_SNAKE_CASE = {
'hans': 3,
}
_SCREAMING_SNAKE_CASE = {
'hans': HansProcessor,
}
| 83 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 83 | 1 |
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_SCREAMING_SNAKE_CASE = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
_SCREAMING_SNAKE_CASE = importlib.util.spec_from_file_location(
'transformers',
os.path.join(PATH_TO_TRANSFORMERS, '__init__.py'),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
_SCREAMING_SNAKE_CASE = spec.loader.load_module()
_SCREAMING_SNAKE_CASE = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_SCREAMING_SNAKE_CASE = re.compile('\[(.+?)\]\((https://huggingface\.co/.+?)\)')
_SCREAMING_SNAKE_CASE = {
'CLIPConfigMixin',
'DecisionTransformerConfigMixin',
'EncoderDecoderConfigMixin',
'RagConfigMixin',
'SpeechEncoderDecoderConfigMixin',
'VisionEncoderDecoderConfigMixin',
'VisionTextDualEncoderConfigMixin',
}
def snake_case ( ) -> Optional[Any]:
_A = []
for config_class in list(CONFIG_MAPPING.values()):
_A = False
# source code of `config_class`
_A = inspect.getsource(snake_case__)
_A = _re_checkpoint.findall(snake_case__)
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
_A , _A = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
_A = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
_A = True
break
_A = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(snake_case__)
if len(snake_case__) > 0:
_A = """\n""".join(sorted(snake_case__))
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''')
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 83 | from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase )
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
self.check_model_type(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Tuple:
_A , _A = {}, {}
if padding is not None:
_A = padding
if truncation is not None:
_A = truncation
if top_k is not None:
_A = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ ) -> Union[str, Any]:
if isinstance(lowerCAmelCase_ , (Image.Image, str) ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A = {"""image""": image, """question""": question}
else:
_A = image
_A = super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ )
return results
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> Any:
_A = load_image(inputs["""image"""] )
_A = self.tokenizer(
inputs["""question"""] , return_tensors=self.framework , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ )
_A = self.image_processor(images=lowerCAmelCase_ , return_tensors=self.framework )
model_inputs.update(lowerCAmelCase_ )
return model_inputs
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
_A = self.model(**lowerCAmelCase_ )
return model_outputs
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=5 ) -> Union[str, Any]:
if top_k > self.model.config.num_labels:
_A = self.model.config.num_labels
if self.framework == "pt":
_A = model_outputs.logits.sigmoid()[0]
_A , _A = probs.topk(lowerCAmelCase_ )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
_A = scores.tolist()
_A = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowerCAmelCase_ , lowerCAmelCase_ )]
| 83 | 1 |
from pathlib import Path
import fire
def snake_case ( snake_case__ :str , snake_case__ :str , snake_case__ :int) -> str:
_A = Path(snake_case__)
_A = Path(snake_case__)
dest_dir.mkdir(exist_ok=snake_case__)
for path in src_dir.iterdir():
_A = [x.rstrip() for x in list(path.open().readlines())][:n]
_A = dest_dir.joinpath(path.name)
print(snake_case__)
dest_path.open("""w""").write("""\n""".join(snake_case__))
if __name__ == "__main__":
fire.Fire(minify)
| 83 | import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def snake_case ( snake_case__ :str , snake_case__ :str , snake_case__ :str , snake_case__ :PreTrainedTokenizer , snake_case__ :int , snake_case__ :Optional[int] = None , ) -> Optional[int]:
_A = {}
if train_file is not None:
_A = [train_file]
if eval_file is not None:
_A = [eval_file]
if test_file is not None:
_A = [test_file]
_A = datasets.load_dataset("""csv""" , data_files=snake_case__)
_A = list(ds[list(files.keys())[0]].features.keys())
_A = features_name.pop(snake_case__)
_A = list(set(ds[list(files.keys())[0]][label_name]))
_A = {label: i for i, label in enumerate(snake_case__)}
_A = tokenizer.model_input_names
_A = {}
if len(snake_case__) == 1:
for k in files.keys():
_A = ds[k].map(
lambda snake_case__: tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=snake_case__ , max_length=snake_case__ , padding="""max_length""") , batched=snake_case__ , )
elif len(snake_case__) == 2:
for k in files.keys():
_A = ds[k].map(
lambda snake_case__: tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=snake_case__ , max_length=snake_case__ , padding="""max_length""" , ) , batched=snake_case__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_A = {k: v for k, v in ex.items() if k in input_names}
_A = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_A = {k: v for k, v in ex.items() if k in input_names}
_A = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_A = {k: v for k, v in ex.items() if k in input_names}
_A = labelaid[ex[label_name]]
yield (d, label)
_A = (
tf.data.Dataset.from_generator(
snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_A = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN])))
_A = (
tf.data.Dataset.from_generator(
snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_A = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION])))
_A = (
tf.data.Dataset.from_generator(
snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_A = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST])))
return train_ds, val_ds, test_ds, labelaid
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class a :
"""simple docstring"""
lowerCamelCase :int = field(metadata={'''help''': '''Which column contains the label'''} )
lowerCamelCase :str = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the training file'''} )
lowerCamelCase :Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the development file'''} )
lowerCamelCase :Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the test file'''} )
lowerCamelCase :int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowerCamelCase :bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class a :
"""simple docstring"""
lowerCamelCase :str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowerCamelCase :bool = field(default=__lowerCAmelCase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def snake_case ( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments))
_A , _A , _A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""")
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(
F'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1)}, '''
F'''16-bits training: {training_args.fpaa}''')
logger.info(F'''Training/evaluation parameters {training_args}''')
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_A , _A , _A , _A = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=snake_case__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
_A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(snake_case__) , labelaid=snake_case__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
_A = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path) , config=snake_case__ , cache_dir=model_args.cache_dir , )
def compute_metrics(snake_case__ :EvalPrediction) -> Dict:
_A = np.argmax(p.predictions , axis=1)
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_A = TFTrainer(
model=snake_case__ , args=snake_case__ , train_dataset=snake_case__ , eval_dataset=snake_case__ , compute_metrics=snake_case__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
_A = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""")
_A = trainer.evaluate()
_A = os.path.join(training_args.output_dir , """eval_results.txt""")
with open(snake_case__ , """w""") as writer:
logger.info("""***** Eval results *****""")
for key, value in result.items():
logger.info(F''' {key} = {value}''')
writer.write(F'''{key} = {value}\n''')
results.update(snake_case__)
return results
if __name__ == "__main__":
main()
| 83 | 1 |
_SCREAMING_SNAKE_CASE = [
(1_000, 'M'),
(900, 'CM'),
(500, 'D'),
(400, 'CD'),
(100, 'C'),
(90, 'XC'),
(50, 'L'),
(40, 'XL'),
(10, 'X'),
(9, 'IX'),
(5, 'V'),
(4, 'IV'),
(1, 'I'),
]
def snake_case ( snake_case__ :str) -> int:
_A = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1_000}
_A = 0
_A = 0
while place < len(snake_case__):
if (place + 1 < len(snake_case__)) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def snake_case ( snake_case__ :int) -> str:
_A = []
for arabic, roman in ROMAN:
((_A) , (_A)) = divmod(snake_case__ , snake_case__)
result.append(roman * factor)
if number == 0:
break
return "".join(snake_case__)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Union[str, Any] = '''speech_to_text'''
lowerCamelCase :List[str] = ['''past_key_values''']
lowerCamelCase :str = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowerCAmelCase_=1_00_00 , lowerCAmelCase_=12 , lowerCAmelCase_=20_48 , lowerCAmelCase_=4 , lowerCAmelCase_=6 , lowerCAmelCase_=20_48 , lowerCAmelCase_=4 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_="relu" , lowerCAmelCase_=2_56 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=2 , lowerCAmelCase_=True , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_=60_00 , lowerCAmelCase_=10_24 , lowerCAmelCase_=2 , lowerCAmelCase_=(5, 5) , lowerCAmelCase_=10_24 , lowerCAmelCase_=80 , lowerCAmelCase_=1 , **lowerCAmelCase_ , ) -> Tuple:
_A = vocab_size
_A = d_model
_A = encoder_ffn_dim
_A = encoder_layers
_A = encoder_attention_heads
_A = decoder_ffn_dim
_A = decoder_layers
_A = decoder_attention_heads
_A = dropout
_A = attention_dropout
_A = activation_dropout
_A = activation_function
_A = init_std
_A = encoder_layerdrop
_A = decoder_layerdrop
_A = use_cache
_A = encoder_layers
_A = scale_embedding # scale factor will be sqrt(d_model) if True
_A = max_source_positions
_A = max_target_positions
_A = num_conv_layers
_A = list(lowerCAmelCase_ )
_A = conv_channels
_A = input_feat_per_channel
_A = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
F'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 83 | 1 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> None:
warnings.warn(
"""The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use ChineseCLIPImageProcessor instead.""" , lowerCAmelCase_ , )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 83 | from __future__ import annotations
from collections.abc import Callable
def snake_case ( snake_case__ :Callable[[int | float], int | float] , snake_case__ :int | float , snake_case__ :int | float , snake_case__ :int = 100 , ) -> float:
_A = x_start
_A = fnc(snake_case__)
_A = 0.0
for _ in range(snake_case__):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
_A = (x_end - x_start) / steps + xa
_A = fnc(snake_case__)
area += abs(fxa + fxa) * (xa - xa) / 2
# Increment step
_A = xa
_A = fxa
return area
if __name__ == "__main__":
def snake_case ( snake_case__ :Tuple) -> List[str]:
return x**3 + x**2
print('f(x) = x^3 + x^2')
print('The area between the curve, x = -5, x = 5 and the x axis is:')
_SCREAMING_SNAKE_CASE = 10
while i <= 100_000:
print(F'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10
| 83 | 1 |
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
_SCREAMING_SNAKE_CASE = 'scheduler_config.json'
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :List[Any] = 1
lowerCamelCase :Any = 2
lowerCamelCase :int = 3
lowerCamelCase :Optional[int] = 4
lowerCamelCase :List[str] = 5
lowerCamelCase :Dict = 6
lowerCamelCase :Optional[Any] = 7
lowerCamelCase :int = 8
lowerCamelCase :List[Any] = 9
lowerCamelCase :str = 10
lowerCamelCase :str = 11
lowerCamelCase :Optional[Any] = 12
lowerCamelCase :List[Any] = 13
lowerCamelCase :List[str] = 14
@dataclass
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :torch.FloatTensor
class a :
"""simple docstring"""
lowerCamelCase :Optional[int] = SCHEDULER_CONFIG_NAME
lowerCamelCase :List[str] = []
lowerCamelCase :Optional[int] = True
@classmethod
def UpperCAmelCase ( cls , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> List[str]:
_A , _A , _A = cls.load_config(
pretrained_model_name_or_path=lowerCAmelCase_ , subfolder=lowerCAmelCase_ , return_unused_kwargs=lowerCAmelCase_ , return_commit_hash=lowerCAmelCase_ , **lowerCAmelCase_ , )
return cls.from_config(lowerCAmelCase_ , return_unused_kwargs=lowerCAmelCase_ , **lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False , **lowerCAmelCase_ ) -> List[Any]:
self.save_config(save_directory=lowerCAmelCase_ , push_to_hub=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def UpperCAmelCase ( self ) -> List[str]:
return self._get_compatibles()
@classmethod
def UpperCAmelCase ( cls ) -> Tuple:
_A = list(set([cls.__name__] + cls._compatibles ) )
_A = importlib.import_module(__name__.split(""".""" )[0] )
_A = [
getattr(lowerCAmelCase_ , lowerCAmelCase_ ) for c in compatible_classes_str if hasattr(lowerCAmelCase_ , lowerCAmelCase_ )
]
return compatible_classes
| 83 | import numpy as np
import qiskit
def snake_case ( snake_case__ :int = 8 , snake_case__ :int | None = None) -> str:
_A = np.random.default_rng(seed=snake_case__)
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
_A = 6 * key_len
# Measurement basis for Alice's qubits.
_A = rng.integers(2 , size=snake_case__)
# The set of states Alice will prepare.
_A = rng.integers(2 , size=snake_case__)
# Measurement basis for Bob's qubits.
_A = rng.integers(2 , size=snake_case__)
# Quantum Circuit to simulate BB84
_A = qiskit.QuantumCircuit(snake_case__ , name="""BB84""")
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(snake_case__):
if alice_state[index] == 1:
bbaa_circ.x(snake_case__)
if alice_basis[index] == 1:
bbaa_circ.h(snake_case__)
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(snake_case__):
if bob_basis[index] == 1:
bbaa_circ.h(snake_case__)
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
_A = qiskit.Aer.get_backend("""aer_simulator""")
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
_A = qiskit.execute(snake_case__ , snake_case__ , shots=1 , seed_simulator=snake_case__)
# Returns the result of measurement.
_A = job.result().get_counts(snake_case__).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
_A = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
snake_case__ , snake_case__ , snake_case__)
if alice_basis_bit == bob_basis_bit
])
# Get final key. Pad with 0 if too short, otherwise truncate.
_A = gen_key[:key_len] if len(snake_case__) >= key_len else gen_key.ljust(snake_case__ , """0""")
return key
if __name__ == "__main__":
print(F'''The generated key is : {bbaa(8, seed=0)}''')
from doctest import testmod
testmod()
| 83 | 1 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
_SCREAMING_SNAKE_CASE = 'Run commands across TPU VMs for initial setup before running `accelerate launch`.'
def snake_case ( snake_case__ :Tuple=None) -> int:
if subparsers is not None:
_A = subparsers.add_parser("""tpu-config""" , description=_description)
else:
_A = argparse.ArgumentParser("""Accelerate tpu-config command""" , description=_description)
# Core arguments
_A = parser.add_argument_group(
"""Config Arguments""" , """Arguments that can be configured through `accelerate config`.""")
config_args.add_argument(
"""--config_file""" , type=snake_case__ , default=snake_case__ , help="""Path to the config file to use for accelerate.""" , )
config_args.add_argument(
"""--tpu_name""" , default=snake_case__ , help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""" , )
config_args.add_argument(
"""--tpu_zone""" , default=snake_case__ , help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""" , )
_A = parser.add_argument_group("""TPU Arguments""" , """Arguments for options ran inside the TPU.""")
pod_args.add_argument(
"""--use_alpha""" , action="""store_true""" , help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""" , )
pod_args.add_argument(
"""--command_file""" , default=snake_case__ , help="""The path to the file containing the commands to run on the pod on startup.""" , )
pod_args.add_argument(
"""--command""" , action="""append""" , nargs="""+""" , help="""A command to run on the pod. Can be passed multiple times.""" , )
pod_args.add_argument(
"""--install_accelerate""" , action="""store_true""" , help="""Whether to install accelerate on the pod. Defaults to False.""" , )
pod_args.add_argument(
"""--accelerate_version""" , default="""latest""" , help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.""" , )
pod_args.add_argument(
"""--debug""" , action="""store_true""" , help="""If set, will print the command that would be run instead of running it.""")
if subparsers is not None:
parser.set_defaults(func=snake_case__)
return parser
def snake_case ( snake_case__ :Union[str, Any]) -> Any:
_A = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(snake_case__):
_A = load_config_from_file(args.config_file)
if not args.command_file and defaults.command_file is not None and not args.command:
_A = defaults.command_file
if not args.command and defaults.commands is not None:
_A = defaults.commands
if not args.tpu_name:
_A = defaults.tpu_name
if not args.tpu_zone:
_A = defaults.tpu_zone
if args.accelerate_version == "dev":
_A = """git+https://github.com/huggingface/accelerate.git"""
elif args.accelerate_version == "latest":
_A = """accelerate -U"""
elif isinstance(parse(args.accelerate_version) , snake_case__):
_A = F'''accelerate=={args.accelerate_version}'''
if not args.command_file and not args.command:
raise ValueError("""You must specify either a command file or a command to run on the pod.""")
if args.command_file:
with open(args.command_file , """r""") as f:
_A = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , snake_case__):
_A = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
_A = ["""cd /usr/share"""]
if args.install_accelerate:
new_cmd += [F'''pip install {args.accelerate_version}''']
new_cmd += args.command
_A = """; """.join(snake_case__)
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
_A = ["""gcloud"""]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'''Running {' '.join(snake_case__)}''')
return
subprocess.run(snake_case__)
print("""Successfully setup pod.""")
def snake_case ( ) -> Union[str, Any]:
_A = tpu_command_parser()
_A = parser.parse_args()
tpu_command_launcher(snake_case__)
| 83 | import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def snake_case ( snake_case__ :int) -> Optional[int]:
return EnvironmentCommand()
def snake_case ( snake_case__ :Tuple) -> List[str]:
return EnvironmentCommand(args.accelerate_config_file)
class a ( __lowerCAmelCase ):
"""simple docstring"""
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ ) -> Tuple:
_A = parser.add_parser("""env""" )
download_parser.set_defaults(func=lowerCAmelCase_ )
download_parser.add_argument(
"""--accelerate-config_file""" , default=lowerCAmelCase_ , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=lowerCAmelCase_ )
def __init__( self , lowerCAmelCase_ , *lowerCAmelCase_ ) -> None:
_A = accelerate_config_file
def UpperCAmelCase ( self ) -> Dict:
_A = """not installed"""
if is_safetensors_available():
import safetensors
_A = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
_A = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
_A = """not installed"""
_A = _A = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_A = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(lowerCAmelCase_ ):
_A = load_config_from_file(self._accelerate_config_file ).to_dict()
_A = (
"""\n""".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
else F'''\t{accelerate_config}'''
)
_A = """not installed"""
_A = """NA"""
if is_torch_available():
import torch
_A = torch.__version__
_A = torch.cuda.is_available()
_A = """not installed"""
_A = """NA"""
if is_tf_available():
import tensorflow as tf
_A = tf.__version__
try:
# deprecated in v2.1
_A = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_A = bool(tf.config.list_physical_devices("""GPU""" ) )
_A = """not installed"""
_A = """not installed"""
_A = """not installed"""
_A = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
_A = flax.__version__
_A = jax.__version__
_A = jaxlib.__version__
_A = jax.lib.xla_bridge.get_backend().platform
_A = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": F'''{safetensors_version}''',
"""Accelerate version""": F'''{accelerate_version}''',
"""Accelerate config""": F'''{accelerate_config_str}''',
"""PyTorch version (GPU?)""": F'''{pt_version} ({pt_cuda_available})''',
"""Tensorflow version (GPU?)""": F'''{tf_version} ({tf_cuda_available})''',
"""Flax version (CPU?/GPU?/TPU?)""": F'''{flax_version} ({jax_backend})''',
"""Jax version""": F'''{jax_version}''',
"""JaxLib version""": F'''{jaxlib_version}''',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(lowerCAmelCase_ ) )
return info
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ ) -> Tuple:
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 83 | 1 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Dict = ['''audio_values''', '''audio_mask''']
def __init__( self , lowerCAmelCase_=20_48 , lowerCAmelCase_=1 , lowerCAmelCase_=[16, 16] , lowerCAmelCase_=1_28 , lowerCAmelCase_=4_41_00 , lowerCAmelCase_=86 , lowerCAmelCase_=20_48 , lowerCAmelCase_=0.0 , **lowerCAmelCase_ , ) -> Tuple:
super().__init__(
feature_size=lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , padding_value=lowerCAmelCase_ , **lowerCAmelCase_ , )
_A = spectrogram_length
_A = num_channels
_A = patch_size
_A = feature_size // self.patch_size[1]
_A = n_fft
_A = sampling_rate // hop_length_to_sampling_rate
_A = sampling_rate
_A = padding_value
_A = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase_ , min_frequency=0.0 , max_frequency=2_2050.0 , sampling_rate=lowerCAmelCase_ , norm="""slaney""" , mel_scale="""slaney""" , ).T
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> np.ndarray:
_A = spectrogram(
lowerCAmelCase_ , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , )
_A = log_spec[:, :-1]
_A = log_spec - 20.0
_A = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = False , **lowerCAmelCase_ , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
F''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
_A = isinstance(lowerCAmelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_A = is_batched_numpy or (
isinstance(lowerCAmelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_A = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase_ , np.ndarray ):
_A = np.asarray(lowerCAmelCase_ , dtype=np.floataa )
elif isinstance(lowerCAmelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_A = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_A = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
_A = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , lowerCAmelCase_ ):
_A = [np.asarray(lowerCAmelCase_ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
_A = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
_A = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
_A = np.array(lowerCAmelCase_ ).astype(np.floataa )
# convert into correct format for padding
_A = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
_A = np.ones([len(lowerCAmelCase_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
_A = padded_audio_features * self.padding_value
for i in range(len(lowerCAmelCase_ ) ):
_A = audio_features[i]
_A = feature
# return as BatchFeature
if return_attention_mask:
_A = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
_A = {"""audio_values""": padded_audio_features}
_A = BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
return encoded_inputs
| 83 | import colorsys
from PIL import Image # type: ignore
def snake_case ( snake_case__ :float , snake_case__ :float , snake_case__ :int) -> float:
_A = x
_A = y
for step in range(snake_case__): # noqa: B007
_A = a * a - b * b + x
_A = 2 * a * b + y
_A = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def snake_case ( snake_case__ :float) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def snake_case ( snake_case__ :float) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(snake_case__ , 1 , 1))
def snake_case ( snake_case__ :int = 800 , snake_case__ :int = 600 , snake_case__ :float = -0.6 , snake_case__ :float = 0 , snake_case__ :float = 3.2 , snake_case__ :int = 50 , snake_case__ :bool = True , ) -> Image.Image:
_A = Image.new("""RGB""" , (image_width, image_height))
_A = img.load()
# loop through the image-coordinates
for image_x in range(snake_case__):
for image_y in range(snake_case__):
# determine the figure-coordinates based on the image-coordinates
_A = figure_width / image_width * image_height
_A = figure_center_x + (image_x / image_width - 0.5) * figure_width
_A = figure_center_y + (image_y / image_height - 0.5) * figure_height
_A = get_distance(snake_case__ , snake_case__ , snake_case__)
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_A = get_color_coded_rgb(snake_case__)
else:
_A = get_black_and_white_rgb(snake_case__)
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_SCREAMING_SNAKE_CASE = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 83 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['MBartTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['MBartTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'MBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'MBartForCausalLM',
'MBartForConditionalGeneration',
'MBartForQuestionAnswering',
'MBartForSequenceClassification',
'MBartModel',
'MBartPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'TFMBartForConditionalGeneration',
'TFMBartModel',
'TFMBartPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'FlaxMBartForConditionalGeneration',
'FlaxMBartForQuestionAnswering',
'FlaxMBartForSequenceClassification',
'FlaxMBartModel',
'FlaxMBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 83 | import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
_SCREAMING_SNAKE_CASE = datasets.logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
_SCREAMING_SNAKE_CASE = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
_SCREAMING_SNAKE_CASE = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def snake_case ( snake_case__ :Optional[Any] , snake_case__ :str , snake_case__ :List[str]=False , snake_case__ :Dict=False , snake_case__ :Any=True , snake_case__ :List[str]=False , snake_case__ :Optional[Any]="dummy_doc") -> List[Any]:
_A = {doc: key_lines}
_A = {doc: sys_lines}
_A = {}
_A = 0
_A = 0
_A = 0
_A = 0
_A = 0
_A = 0
_A , _A = reader.get_doc_mentions(snake_case__ , key_doc_lines[doc] , snake_case__)
key_singletons_num += singletons_num
if NP_only or min_span:
_A = reader.set_annotated_parse_trees(snake_case__ , key_doc_lines[doc] , snake_case__ , snake_case__)
_A , _A = reader.get_doc_mentions(snake_case__ , sys_doc_lines[doc] , snake_case__)
sys_singletons_num += singletons_num
if NP_only or min_span:
_A = reader.set_annotated_parse_trees(snake_case__ , key_doc_lines[doc] , snake_case__ , snake_case__)
if remove_nested:
_A , _A = reader.remove_nested_coref_mentions(snake_case__ , snake_case__)
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
_A , _A = reader.remove_nested_coref_mentions(snake_case__ , snake_case__)
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
_A = reader.get_mention_assignments(snake_case__ , snake_case__)
_A = reader.get_mention_assignments(snake_case__ , snake_case__)
_A = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''')
logger.info(
"""Number of resulting singleton clusters in the key """
F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''')
if not keep_singletons:
logger.info(
F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
"""files, respectively""")
return doc_coref_infos
def snake_case ( snake_case__ :Tuple , snake_case__ :Tuple , snake_case__ :Dict , snake_case__ :Dict , snake_case__ :Tuple , snake_case__ :Dict , snake_case__ :Tuple) -> int:
_A = get_coref_infos(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__)
_A = {}
_A = 0
_A = 0
for name, metric in metrics:
_A , _A , _A = evaluator.evaluate_documents(snake_case__ , snake_case__ , beta=1)
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa})
logger.info(
name.ljust(10) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , )
if conll_subparts_num == 3:
_A = (conll / 3) * 100
logger.info(F'''CoNLL score: {conll:.2f}''')
output_scores.update({"""conll_score""": conll})
return output_scores
def snake_case ( snake_case__ :Union[str, Any]) -> List[Any]:
_A = False
for line in key_lines:
if not line.startswith("""#"""):
if len(line.split()) > 6:
_A = line.split()[5]
if not parse_col == "-":
_A = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> Union[str, Any]:
_A = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
_A = util.check_gold_parse_annotation(lowerCAmelCase_ )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
_A = evaluate(
key_lines=lowerCAmelCase_ , sys_lines=lowerCAmelCase_ , metrics=lowerCAmelCase_ , NP_only=lowerCAmelCase_ , remove_nested=lowerCAmelCase_ , keep_singletons=lowerCAmelCase_ , min_span=lowerCAmelCase_ , )
return score
| 83 | 1 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 83 | import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
_SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
_SCREAMING_SNAKE_CASE = {'facebook/blenderbot_small-90M': 512}
def snake_case ( snake_case__ :Tuple) -> str:
_A = set()
_A = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
_A = char
_A = set(snake_case__)
return pairs
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :List[Any] = VOCAB_FILES_NAMES
lowerCamelCase :Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase :List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase :int = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="__start__" , lowerCAmelCase_="__end__" , lowerCAmelCase_="__unk__" , lowerCAmelCase_="__null__" , **lowerCAmelCase_ , ) -> int:
super().__init__(unk_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , **lowerCAmelCase_ )
with open(lowerCAmelCase_ , encoding="""utf-8""" ) as vocab_handle:
_A = json.load(lowerCAmelCase_ )
_A = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase_ , encoding="""utf-8""" ) as merges_handle:
_A = merges_handle.read().split("""\n""" )[1:-1]
_A = [tuple(merge.split() ) for merge in merges]
_A = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_A = {}
@property
def UpperCAmelCase ( self ) -> int:
return len(self.encoder )
def UpperCAmelCase ( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str:
if token in self.cache:
return self.cache[token]
_A = re.sub("""([.,!?()])""" , r""" \1""" , lowerCAmelCase_ )
_A = re.sub("""(')""" , r""" \1 """ , lowerCAmelCase_ )
_A = re.sub(r"""\s{2,}""" , """ """ , lowerCAmelCase_ )
if "\n" in token:
_A = token.replace("""\n""" , """ __newln__""" )
_A = token.split(""" """ )
_A = []
for token in tokens:
if not len(lowerCAmelCase_ ):
continue
_A = token.lower()
_A = tuple(lowerCAmelCase_ )
_A = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
_A = get_pairs(lowerCAmelCase_ )
if not pairs:
words.append(lowerCAmelCase_ )
continue
while True:
_A = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A = bigram
_A = []
_A = 0
while i < len(lowerCAmelCase_ ):
try:
_A = word.index(lowerCAmelCase_ , lowerCAmelCase_ )
new_word.extend(word[i:j] )
_A = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A = tuple(lowerCAmelCase_ )
_A = new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
_A = get_pairs(lowerCAmelCase_ )
_A = """@@ """.join(lowerCAmelCase_ )
_A = word[:-4]
_A = word
words.append(lowerCAmelCase_ )
return " ".join(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[str]:
_A = []
_A = re.findall(r"""\S+\n?""" , lowerCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(""" """ ) ) )
return split_tokens
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int:
_A = token.lower()
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str:
return self.decoder.get(lowerCAmelCase_ , self.unk_token )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str:
_A = """ """.join(lowerCAmelCase_ ).replace("""@@ """ , """""" ).strip()
return out_string
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_A = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + """\n""" )
_A = 0
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
_A = token_index
writer.write(""" """.join(lowerCAmelCase_ ) + """\n""" )
index += 1
return vocab_file, merge_file
| 83 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class a :
"""simple docstring"""
lowerCamelCase :CommonSchedulerState
# setable values
lowerCamelCase :jnp.ndarray
lowerCamelCase :jnp.ndarray
lowerCamelCase :Optional[int] = None
@classmethod
def UpperCAmelCase ( cls , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
return cls(common=lowerCAmelCase_ , init_noise_sigma=lowerCAmelCase_ , timesteps=lowerCAmelCase_ )
@dataclass
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :DDPMSchedulerState
class a ( __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :List[str] = [e.name for e in FlaxKarrasDiffusionSchedulers]
lowerCamelCase :jnp.dtype
@property
def UpperCAmelCase ( self ) -> Any:
return True
@register_to_config
def __init__( self , lowerCAmelCase_ = 10_00 , lowerCAmelCase_ = 0.0001 , lowerCAmelCase_ = 0.02 , lowerCAmelCase_ = "linear" , lowerCAmelCase_ = None , lowerCAmelCase_ = "fixed_small" , lowerCAmelCase_ = True , lowerCAmelCase_ = "epsilon" , lowerCAmelCase_ = jnp.floataa , ) -> Tuple:
_A = dtype
def UpperCAmelCase ( self , lowerCAmelCase_ = None ) -> DDPMSchedulerState:
if common is None:
_A = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
_A = jnp.array(1.0 , dtype=self.dtype )
_A = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=lowerCAmelCase_ , init_noise_sigma=lowerCAmelCase_ , timesteps=lowerCAmelCase_ , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> jnp.ndarray:
return sample
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = () ) -> DDPMSchedulerState:
_A = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
_A = (jnp.arange(0 , lowerCAmelCase_ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_ , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> Any:
_A = state.common.alphas_cumprod[t]
_A = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_A = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
_A = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
_A = jnp.clip(lowerCAmelCase_ , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
_A = jnp.log(jnp.clip(lowerCAmelCase_ , a_min=1E-20 ) )
elif variance_type == "fixed_large":
_A = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
_A = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
_A = variance
_A = state.common.betas[t]
_A = (predicted_variance + 1) / 2
_A = frac * max_log + (1 - frac) * min_log
return variance
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
_A = timestep
if key is None:
_A = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
_A , _A = jnp.split(lowerCAmelCase_ , sample.shape[1] , axis=1 )
else:
_A = None
# 1. compute alphas, betas
_A = state.common.alphas_cumprod[t]
_A = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
_A = 1 - alpha_prod_t
_A = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_A = model_output
elif self.config.prediction_type == "v_prediction":
_A = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
""" for the FlaxDDPMScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_A = jnp.clip(lowerCAmelCase_ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_A = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
_A = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_A = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
_A = jax.random.split(lowerCAmelCase_ , num=1 )
_A = jax.random.normal(lowerCAmelCase_ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(lowerCAmelCase_ , lowerCAmelCase_ , predicted_variance=lowerCAmelCase_ ) ** 0.5) * noise
_A = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
_A = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=lowerCAmelCase_ , state=lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> jnp.ndarray:
return add_noise_common(state.common , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> jnp.ndarray:
return get_velocity_common(state.common , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __len__( self ) -> Tuple:
return self.config.num_train_timesteps
| 83 | _SCREAMING_SNAKE_CASE = {
'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.',
'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.',
'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----',
'2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...',
'8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.',
':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.',
'?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-',
'(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/'
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
_SCREAMING_SNAKE_CASE = {value: key for key, value in MORSE_CODE_DICT.items()}
def snake_case ( snake_case__ :str) -> str:
return " ".join(MORSE_CODE_DICT[char] for char in message.upper())
def snake_case ( snake_case__ :str) -> str:
return "".join(REVERSE_DICT[char] for char in message.split())
def snake_case ( ) -> None:
_A = """Morse code here!"""
print(snake_case__)
_A = encrypt(snake_case__)
print(snake_case__)
_A = decrypt(snake_case__)
print(snake_case__)
if __name__ == "__main__":
main()
| 83 | 1 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class a ( __lowerCAmelCase ):
"""simple docstring"""
@slow
@require_torch
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
_A = BertTokenizer.from_pretrained("""bert-base-uncased""" )
_A = bertabert.config.encoder.vocab_size
_A = tokenizer.sep_token_id
_A = tokenizer.cls_token_id
_A = 1_28
_A = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
_A = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
_A = train_dataset.select(range(32 ) )
_A = val_dataset.select(range(16 ) )
_A = 4
def _map_to_encoder_decoder_inputs(lowerCAmelCase_ ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_A = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=lowerCAmelCase_ , max_length=5_12 )
_A = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=lowerCAmelCase_ , max_length=1_28 )
_A = inputs.input_ids
_A = inputs.attention_mask
_A = outputs.input_ids
_A = outputs.input_ids.copy()
_A = [
[-1_00 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
_A = outputs.attention_mask
assert all(len(lowerCAmelCase_ ) == 5_12 for x in inputs.input_ids )
assert all(len(lowerCAmelCase_ ) == 1_28 for x in outputs.input_ids )
return batch
def _compute_metrics(lowerCAmelCase_ ):
_A = pred.label_ids
_A = pred.predictions
# all unnecessary tokens are removed
_A = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
_A = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
_A = sum([int(pred_str[i] == label_str[i] ) for i in range(len(lowerCAmelCase_ ) )] ) / len(lowerCAmelCase_ )
return {"accuracy": accuracy}
# map train dataset
_A = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowerCAmelCase_ , batch_size=lowerCAmelCase_ , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
_A = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowerCAmelCase_ , batch_size=lowerCAmelCase_ , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
_A = self.get_auto_remove_tmp_dir()
_A = SeqaSeqTrainingArguments(
output_dir=lowerCAmelCase_ , per_device_train_batch_size=lowerCAmelCase_ , per_device_eval_batch_size=lowerCAmelCase_ , predict_with_generate=lowerCAmelCase_ , evaluation_strategy="""steps""" , do_train=lowerCAmelCase_ , do_eval=lowerCAmelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
_A = SeqaSeqTrainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , compute_metrics=_compute_metrics , train_dataset=lowerCAmelCase_ , eval_dataset=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , )
# start training
trainer.train()
| 83 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 83 | 1 |
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
_SCREAMING_SNAKE_CASE = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class a ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def UpperCAmelCase ( cls ) -> int:
_A = TOKEN
HfFolder.save_token(lowerCAmelCase_ )
@classmethod
def UpperCAmelCase ( cls ) -> Optional[int]:
try:
delete_repo(token=cls._token , repo_id="""test-model-flax""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-model-flax-org""" )
except HTTPError:
pass
def UpperCAmelCase ( self ) -> Any:
_A = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
_A = FlaxBertModel(lowerCAmelCase_ )
model.push_to_hub("""test-model-flax""" , use_auth_token=self._token )
_A = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
_A = flatten_dict(unfreeze(model.params ) )
_A = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_A = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCAmelCase_ , 1E-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="""test-model-flax""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCAmelCase_ , repo_id="""test-model-flax""" , push_to_hub=lowerCAmelCase_ , use_auth_token=self._token )
_A = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
_A = flatten_dict(unfreeze(model.params ) )
_A = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_A = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCAmelCase_ , 1E-3 , msg=F'''{key} not identical''' )
def UpperCAmelCase ( self ) -> List[str]:
_A = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
_A = FlaxBertModel(lowerCAmelCase_ )
model.push_to_hub("""valid_org/test-model-flax-org""" , use_auth_token=self._token )
_A = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
_A = flatten_dict(unfreeze(model.params ) )
_A = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_A = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCAmelCase_ , 1E-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-model-flax-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
lowerCAmelCase_ , repo_id="""valid_org/test-model-flax-org""" , push_to_hub=lowerCAmelCase_ , use_auth_token=self._token )
_A = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
_A = flatten_dict(unfreeze(model.params ) )
_A = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_A = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCAmelCase_ , 1E-3 , msg=F'''{key} not identical''' )
def snake_case ( snake_case__ :int , snake_case__ :Dict) -> Optional[Any]:
_A = True
_A = flatten_dict(modela.params)
_A = flatten_dict(modela.params)
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key])) > 1E-4:
_A = False
return models_are_equal
@require_flax
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> int:
_A = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
_A = FlaxBertModel(lowerCAmelCase_ )
_A = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) )
with self.assertRaises(lowerCAmelCase_ ):
_A = FlaxBertModel.from_pretrained(lowerCAmelCase_ )
_A = FlaxBertModel.from_pretrained(lowerCAmelCase_ , subfolder=lowerCAmelCase_ )
self.assertTrue(check_models_equal(lowerCAmelCase_ , lowerCAmelCase_ ) )
def UpperCAmelCase ( self ) -> Any:
_A = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
_A = FlaxBertModel(lowerCAmelCase_ )
_A = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , max_shard_size="""10KB""" )
with self.assertRaises(lowerCAmelCase_ ):
_A = FlaxBertModel.from_pretrained(lowerCAmelCase_ )
_A = FlaxBertModel.from_pretrained(lowerCAmelCase_ , subfolder=lowerCAmelCase_ )
self.assertTrue(check_models_equal(lowerCAmelCase_ , lowerCAmelCase_ ) )
def UpperCAmelCase ( self ) -> int:
_A = """bert"""
_A = """hf-internal-testing/tiny-random-bert-subfolder"""
with self.assertRaises(lowerCAmelCase_ ):
_A = FlaxBertModel.from_pretrained(lowerCAmelCase_ )
_A = FlaxBertModel.from_pretrained(lowerCAmelCase_ , subfolder=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = """bert"""
_A = """hf-internal-testing/tiny-random-bert-sharded-subfolder"""
with self.assertRaises(lowerCAmelCase_ ):
_A = FlaxBertModel.from_pretrained(lowerCAmelCase_ )
_A = FlaxBertModel.from_pretrained(lowerCAmelCase_ , subfolder=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
| 83 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Tuple = '''philschmid/bart-large-cnn-samsum'''
lowerCamelCase :Tuple = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
lowerCamelCase :List[Any] = '''summarizer'''
lowerCamelCase :List[str] = AutoTokenizer
lowerCamelCase :Dict = AutoModelForSeqaSeqLM
lowerCamelCase :int = ['''text''']
lowerCamelCase :List[Any] = ['''text''']
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
return self.pre_processor(lowerCAmelCase_ , return_tensors="""pt""" , truncation=lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
return self.model.generate(**lowerCAmelCase_ )[0]
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
return self.pre_processor.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
| 83 | 1 |
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def snake_case ( snake_case__ :int) -> Optional[int]:
return EnvironmentCommand()
def snake_case ( snake_case__ :Tuple) -> List[str]:
return EnvironmentCommand(args.accelerate_config_file)
class a ( __lowerCAmelCase ):
"""simple docstring"""
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ ) -> Tuple:
_A = parser.add_parser("""env""" )
download_parser.set_defaults(func=lowerCAmelCase_ )
download_parser.add_argument(
"""--accelerate-config_file""" , default=lowerCAmelCase_ , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=lowerCAmelCase_ )
def __init__( self , lowerCAmelCase_ , *lowerCAmelCase_ ) -> None:
_A = accelerate_config_file
def UpperCAmelCase ( self ) -> Dict:
_A = """not installed"""
if is_safetensors_available():
import safetensors
_A = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
_A = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
_A = """not installed"""
_A = _A = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_A = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(lowerCAmelCase_ ):
_A = load_config_from_file(self._accelerate_config_file ).to_dict()
_A = (
"""\n""".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
else F'''\t{accelerate_config}'''
)
_A = """not installed"""
_A = """NA"""
if is_torch_available():
import torch
_A = torch.__version__
_A = torch.cuda.is_available()
_A = """not installed"""
_A = """NA"""
if is_tf_available():
import tensorflow as tf
_A = tf.__version__
try:
# deprecated in v2.1
_A = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_A = bool(tf.config.list_physical_devices("""GPU""" ) )
_A = """not installed"""
_A = """not installed"""
_A = """not installed"""
_A = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
_A = flax.__version__
_A = jax.__version__
_A = jaxlib.__version__
_A = jax.lib.xla_bridge.get_backend().platform
_A = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": F'''{safetensors_version}''',
"""Accelerate version""": F'''{accelerate_version}''',
"""Accelerate config""": F'''{accelerate_config_str}''',
"""PyTorch version (GPU?)""": F'''{pt_version} ({pt_cuda_available})''',
"""Tensorflow version (GPU?)""": F'''{tf_version} ({tf_cuda_available})''',
"""Flax version (CPU?/GPU?/TPU?)""": F'''{flax_version} ({jax_backend})''',
"""Jax version""": F'''{jax_version}''',
"""JaxLib version""": F'''{jaxlib_version}''',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(lowerCAmelCase_ ) )
return info
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ ) -> Tuple:
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 83 | import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
_SCREAMING_SNAKE_CASE = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def snake_case ( snake_case__ :Union[str, Any]) -> Dict:
_A = torch.load(snake_case__ , map_location="""cpu""")
return sd
def snake_case ( snake_case__ :List[str] , snake_case__ :Optional[Any] , snake_case__ :int=rename_keys_prefix) -> Optional[Any]:
_A = OrderedDict()
_A = torch.arange(config.max_position_embeddings).expand((1, -1))
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_A = key
for name_pair in rename_keys_prefix:
_A = new_key.replace(name_pair[0] , name_pair[1])
_A = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_A = new_d["""cls.predictions.bias"""]
return new_d
@torch.no_grad()
def snake_case ( snake_case__ :Tuple , snake_case__ :Tuple) -> int:
assert (
checkpoint_path.split("""/""")[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
_A = """pretraining"""
if "vcr" in checkpoint_path:
_A = {"""visual_embedding_dim""": 512}
elif "vqa_advanced" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048}
elif "vqa" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048}
elif "nlvr" in checkpoint_path:
_A = {"""visual_embedding_dim""": 1_024}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''')
else:
if "vcr" in checkpoint_path:
_A = {"""visual_embedding_dim""": 512}
_A = """multichoice"""
elif "vqa_advanced" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048}
_A = """vqa_advanced"""
elif "vqa" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048, """num_labels""": 3_129}
_A = """vqa"""
elif "nlvr" in checkpoint_path:
_A = {
"""visual_embedding_dim""": 1_024,
"""num_labels""": 2,
}
_A = """nlvr"""
_A = VisualBertConfig(**snake_case__)
# Load State Dict
_A = load_state_dict(snake_case__)
_A = get_new_dict(snake_case__ , snake_case__)
if model_type == "pretraining":
_A = VisualBertForPreTraining(snake_case__)
elif model_type == "vqa":
_A = VisualBertForQuestionAnswering(snake_case__)
elif model_type == "nlvr":
_A = VisualBertForVisualReasoning(snake_case__)
elif model_type == "multichoice":
_A = VisualBertForMultipleChoice(snake_case__)
model.load_state_dict(snake_case__)
# Save Checkpoints
Path(snake_case__).mkdir(exist_ok=snake_case__)
model.save_pretrained(snake_case__)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 83 | 1 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ = None ) -> None:
if components is None:
_A = []
_A = list(lowerCAmelCase_ )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(lowerCAmelCase_ , self.__components ) ) + ")"
def __add__( self , lowerCAmelCase_ ) -> Vector:
_A = len(self )
if size == len(lowerCAmelCase_ ):
_A = [self.__components[i] + other.component(lowerCAmelCase_ ) for i in range(lowerCAmelCase_ )]
return Vector(lowerCAmelCase_ )
else:
raise Exception("""must have the same size""" )
def __sub__( self , lowerCAmelCase_ ) -> Vector:
_A = len(self )
if size == len(lowerCAmelCase_ ):
_A = [self.__components[i] - other.component(lowerCAmelCase_ ) for i in range(lowerCAmelCase_ )]
return Vector(lowerCAmelCase_ )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__( self , lowerCAmelCase_ ) -> Vector:
...
@overload
def __mul__( self , lowerCAmelCase_ ) -> float:
...
def __mul__( self , lowerCAmelCase_ ) -> float | Vector:
if isinstance(lowerCAmelCase_ , (float, int) ):
_A = [c * other for c in self.__components]
return Vector(lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(self ) == len(lowerCAmelCase_ ):
_A = len(self )
_A = [self.__components[i] * other.component(lowerCAmelCase_ ) for i in range(lowerCAmelCase_ )]
return sum(lowerCAmelCase_ )
else: # error case
raise Exception("""invalid operand!""" )
def UpperCAmelCase ( self ) -> Vector:
return Vector(self.__components )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> float:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
_A = value
def UpperCAmelCase ( self ) -> float:
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
_A = [c**2 for c in self.__components]
return math.sqrt(sum(lowerCAmelCase_ ) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False ) -> float:
_A = self * other
_A = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def snake_case ( snake_case__ :int) -> Vector:
assert isinstance(snake_case__ , snake_case__)
return Vector([0] * dimension)
def snake_case ( snake_case__ :int , snake_case__ :int) -> Vector:
assert isinstance(snake_case__ , snake_case__) and (isinstance(snake_case__ , snake_case__))
_A = [0] * dimension
_A = 1
return Vector(snake_case__)
def snake_case ( snake_case__ :float , snake_case__ :Vector , snake_case__ :Vector) -> Vector:
assert (
isinstance(snake_case__ , snake_case__)
and isinstance(snake_case__ , snake_case__)
and (isinstance(snake_case__ , (int, float)))
)
return x * scalar + y
def snake_case ( snake_case__ :int , snake_case__ :int , snake_case__ :int) -> Vector:
random.seed(snake_case__)
_A = [random.randint(snake_case__ , snake_case__) for _ in range(snake_case__)]
return Vector(snake_case__)
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
_A = matrix
_A = w
_A = h
def __str__( self ) -> str:
_A = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , lowerCAmelCase_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
_A = []
for i in range(self.__height ):
_A = [
self.__matrix[i][j] + other.component(lowerCAmelCase_ , lowerCAmelCase_ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase_ )
return Matrix(lowerCAmelCase_ , self.__width , self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__( self , lowerCAmelCase_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
_A = []
for i in range(self.__height ):
_A = [
self.__matrix[i][j] - other.component(lowerCAmelCase_ , lowerCAmelCase_ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase_ )
return Matrix(lowerCAmelCase_ , self.__width , self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__( self , lowerCAmelCase_ ) -> Matrix:
...
@overload
def __mul__( self , lowerCAmelCase_ ) -> Vector:
...
def __mul__( self , lowerCAmelCase_ ) -> Vector | Matrix:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): # matrix-vector
if len(lowerCAmelCase_ ) == self.__width:
_A = zero_vector(self.__height )
for i in range(self.__height ):
_A = [
self.__matrix[i][j] * other.component(lowerCAmelCase_ )
for j in range(self.__width )
]
ans.change_component(lowerCAmelCase_ , sum(lowerCAmelCase_ ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(lowerCAmelCase_ , (int, float) ): # matrix-scalar
_A = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(lowerCAmelCase_ , self.__width , self.__height )
return None
def UpperCAmelCase ( self ) -> int:
return self.__height
def UpperCAmelCase ( self ) -> int:
return self.__width
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
_A = value
else:
raise Exception("""change_component: indices out of bounds""" )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
_A = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(lowerCAmelCase_ ) ):
_A = minor[i][:y] + minor[i][y + 1 :]
return Matrix(lowerCAmelCase_ , self.__width - 1 , self.__height - 1 ).determinant()
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(lowerCAmelCase_ , lowerCAmelCase_ )
else:
raise Exception("""Indices out of bounds""" )
def UpperCAmelCase ( self ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
_A = [
self.__matrix[0][y] * self.cofactor(0 , lowerCAmelCase_ ) for y in range(self.__width )
]
return sum(lowerCAmelCase_ )
def snake_case ( snake_case__ :int) -> Matrix:
_A = [[0] * n for _ in range(snake_case__)]
return Matrix(snake_case__ , snake_case__ , snake_case__)
def snake_case ( snake_case__ :int , snake_case__ :int , snake_case__ :int , snake_case__ :int) -> Matrix:
random.seed(snake_case__)
_A = [
[random.randint(snake_case__ , snake_case__) for _ in range(snake_case__)] for _ in range(snake_case__)
]
return Matrix(snake_case__ , snake_case__ , snake_case__)
| 83 | from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class a ( __lowerCAmelCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[str]:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def UpperCAmelCase ( self ) -> Optional[int]:
_A = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self._create_example_records()
_A = Dataset.from_list(lowerCAmelCase_ )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(lowerCAmelCase_ ):
self.assertDictEqual(lowerCAmelCase_ , example_records[i] )
def UpperCAmelCase ( self ) -> str:
_A = self._create_example_records()
_A = Dataset.from_list(lowerCAmelCase_ )
_A = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def UpperCAmelCase ( self ) -> Any: # checks what happens with missing columns
_A = [{"""col_1""": 1}, {"""col_2""": """x"""}]
_A = Dataset.from_list(lowerCAmelCase_ )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def UpperCAmelCase ( self ) -> Tuple: # checks if the type can be inferred from the second record
_A = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
_A = Dataset.from_list(lowerCAmelCase_ )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def UpperCAmelCase ( self ) -> Any:
_A = Dataset.from_list([] )
self.assertEqual(len(lowerCAmelCase_ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 83 | 1 |
def snake_case ( snake_case__ :Dict , snake_case__ :Optional[int] , snake_case__ :Optional[Any] , snake_case__ :str) -> Dict:
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
_A = mf_knapsack(i - 1 , snake_case__ , snake_case__ , snake_case__)
else:
_A = max(
mf_knapsack(i - 1 , snake_case__ , snake_case__ , snake_case__) , mf_knapsack(i - 1 , snake_case__ , snake_case__ , j - wt[i - 1]) + val[i - 1] , )
_A = val
return f[i][j]
def snake_case ( snake_case__ :List[str] , snake_case__ :List[Any] , snake_case__ :List[Any] , snake_case__ :List[Any]) -> List[str]:
_A = [[0] * (w + 1) for _ in range(n + 1)]
for i in range(1 , n + 1):
for w_ in range(1 , w + 1):
if wt[i - 1] <= w_:
_A = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_])
else:
_A = dp[i - 1][w_]
return dp[n][w_], dp
def snake_case ( snake_case__ :int , snake_case__ :list , snake_case__ :list) -> Dict:
if not (isinstance(snake_case__ , (list, tuple)) and isinstance(snake_case__ , (list, tuple))):
raise ValueError(
"""Both the weights and values vectors must be either lists or tuples""")
_A = len(snake_case__)
if num_items != len(snake_case__):
_A = (
"""The number of weights must be the same as the number of values.\n"""
F'''But got {num_items} weights and {len(snake_case__)} values'''
)
raise ValueError(snake_case__)
for i in range(snake_case__):
if not isinstance(wt[i] , snake_case__):
_A = (
"""All weights must be integers but got weight of """
F'''type {type(wt[i])} at index {i}'''
)
raise TypeError(snake_case__)
_A , _A = knapsack(snake_case__ , snake_case__ , snake_case__ , snake_case__)
_A = set()
_construct_solution(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__)
return optimal_val, example_optional_set
def snake_case ( snake_case__ :list , snake_case__ :list , snake_case__ :int , snake_case__ :int , snake_case__ :set) -> List[str]:
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(snake_case__ , snake_case__ , i - 1 , snake_case__ , snake_case__)
else:
optimal_set.add(snake_case__)
_construct_solution(snake_case__ , snake_case__ , i - 1 , j - wt[i - 1] , snake_case__)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = [3, 2, 4, 4]
_SCREAMING_SNAKE_CASE = [4, 3, 2, 3]
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = 6
_SCREAMING_SNAKE_CASE = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('optimal_value = ', optimal_solution)
print('An optimal subset corresponding to the optimal value', optimal_subset)
| 83 | def snake_case ( snake_case__ :int = 1_000_000) -> int:
_A = set(range(3 , snake_case__ , 2))
primes.add(2)
for p in range(3 , snake_case__ , 2):
if p not in primes:
continue
primes.difference_update(set(range(p * p , snake_case__ , snake_case__)))
_A = [float(snake_case__) for n in range(limit + 1)]
for p in primes:
for n in range(snake_case__ , limit + 1 , snake_case__):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:]))
if __name__ == "__main__":
print(F'''{solution() = }''')
| 83 | 1 |
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_="None" , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ) -> Union[str, Any]:
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = relative_attention
_A = position_biased_input
_A = pos_att_type
_A = scope
def UpperCAmelCase ( self ) -> Dict:
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self ) -> Optional[int]:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Any:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_A = DebertaVaModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
_A = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
_A = model(lowerCAmelCase_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_A = DebertaVaForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_A = self.num_labels
_A = DebertaVaForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_A = self.num_labels
_A = DebertaVaForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
_A = DebertaVaForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_A = DebertaVaForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :int = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCamelCase :str = (
{
'''feature-extraction''': DebertaVaModel,
'''fill-mask''': DebertaVaForMaskedLM,
'''question-answering''': DebertaVaForQuestionAnswering,
'''text-classification''': DebertaVaForSequenceClassification,
'''token-classification''': DebertaVaForTokenClassification,
'''zero-shot''': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase :str = True
lowerCamelCase :Union[str, Any] = False
lowerCamelCase :Optional[int] = False
lowerCamelCase :List[str] = False
lowerCamelCase :str = False
def UpperCAmelCase ( self ) -> Optional[int]:
_A = DebertaVaModelTester(self )
_A = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def UpperCAmelCase ( self ) -> List[str]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> List[str]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Any:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> int:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*lowerCAmelCase_ )
@slow
def UpperCAmelCase ( self ) -> Any:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = DebertaVaModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="""Model not available yet""" )
def UpperCAmelCase ( self ) -> int:
pass
@slow
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
_A = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
_A = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
# compare the actual values for a slice.
_A = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1E-4 ) , F'''{output[:, 1:4, 1:4]}''' )
| 83 | import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_="None" , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ) -> Union[str, Any]:
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = relative_attention
_A = position_biased_input
_A = pos_att_type
_A = scope
def UpperCAmelCase ( self ) -> Dict:
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self ) -> Optional[int]:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Any:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_A = DebertaVaModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
_A = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
_A = model(lowerCAmelCase_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_A = DebertaVaForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_A = self.num_labels
_A = DebertaVaForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_A = self.num_labels
_A = DebertaVaForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
_A = DebertaVaForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_A = DebertaVaForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :int = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCamelCase :str = (
{
'''feature-extraction''': DebertaVaModel,
'''fill-mask''': DebertaVaForMaskedLM,
'''question-answering''': DebertaVaForQuestionAnswering,
'''text-classification''': DebertaVaForSequenceClassification,
'''token-classification''': DebertaVaForTokenClassification,
'''zero-shot''': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase :str = True
lowerCamelCase :Union[str, Any] = False
lowerCamelCase :Optional[int] = False
lowerCamelCase :List[str] = False
lowerCamelCase :str = False
def UpperCAmelCase ( self ) -> Optional[int]:
_A = DebertaVaModelTester(self )
_A = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def UpperCAmelCase ( self ) -> List[str]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> List[str]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Any:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> int:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*lowerCAmelCase_ )
@slow
def UpperCAmelCase ( self ) -> Any:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = DebertaVaModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="""Model not available yet""" )
def UpperCAmelCase ( self ) -> int:
pass
@slow
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
_A = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
_A = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
# compare the actual values for a slice.
_A = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1E-4 ) , F'''{output[:, 1:4, 1:4]}''' )
| 83 | 1 |
import heapq
def snake_case ( snake_case__ :dict) -> set[int]:
_A = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(snake_case__ , [-1 * len(snake_case__), (key, value)])
# chosen_vertices = set of chosen vertices
_A = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
_A = heapq.heappop(snake_case__)[1][0]
chosen_vertices.add(snake_case__)
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
_A = elem[1][1].index(snake_case__)
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(snake_case__)
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 83 | def snake_case ( snake_case__ :int , snake_case__ :int) -> int:
return int(input_a == input_a == 0)
def snake_case ( ) -> None:
print("""Truth Table of NOR Gate:""")
print("""| Input 1 | Input 2 | Output |""")
print(F'''| 0 | 0 | {nor_gate(0 , 0)} |''')
print(F'''| 0 | 1 | {nor_gate(0 , 1)} |''')
print(F'''| 1 | 0 | {nor_gate(1 , 0)} |''')
print(F'''| 1 | 1 | {nor_gate(1 , 1)} |''')
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 83 | 1 |
def snake_case ( snake_case__ :list , snake_case__ :list , snake_case__ :int , snake_case__ :int , snake_case__ :int) -> int:
if index == number_of_items:
return 0
_A = 0
_A = 0
_A = knapsack(snake_case__ , snake_case__ , snake_case__ , snake_case__ , index + 1)
if weights[index] <= max_weight:
_A = values[index] + knapsack(
snake_case__ , snake_case__ , snake_case__ , max_weight - weights[index] , index + 1)
return max(snake_case__ , snake_case__)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=sys.maxsize ) -> str:
_A = """bilinear"""
_A = max_size
_A = short_edge_length
def __call__( self , lowerCAmelCase_ ) -> Optional[Any]:
_A = []
for img in imgs:
_A , _A = img.shape[:2]
# later: provide list and randomly choose index for resize
_A = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
_A = size * 1.0 / min(lowerCAmelCase_ , lowerCAmelCase_ )
if h < w:
_A , _A = size, scale * w
else:
_A , _A = scale * h, size
if max(lowerCAmelCase_ , lowerCAmelCase_ ) > self.max_size:
_A = self.max_size * 1.0 / max(lowerCAmelCase_ , lowerCAmelCase_ )
_A = newh * scale
_A = neww * scale
_A = int(neww + 0.5 )
_A = int(newh + 0.5 )
if img.dtype == np.uinta:
_A = Image.fromarray(lowerCAmelCase_ )
_A = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
_A = np.asarray(lowerCAmelCase_ )
else:
_A = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
_A = nn.functional.interpolate(
lowerCAmelCase_ , (newh, neww) , mode=self.interp_method , align_corners=lowerCAmelCase_ ).squeeze(0 )
img_augs.append(lowerCAmelCase_ )
return img_augs
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ ) -> List[Any]:
_A = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
_A = cfg.INPUT.FORMAT
_A = cfg.SIZE_DIVISIBILITY
_A = cfg.PAD_VALUE
_A = cfg.INPUT.MAX_SIZE_TEST
_A = cfg.MODEL.DEVICE
_A = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_A = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_A = lambda lowerCAmelCase_ : (x - self.pixel_mean) / self.pixel_std
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
_A = tuple(max(lowerCAmelCase_ ) for s in zip(*[img.shape for img in images] ) )
_A = [im.shape[-2:] for im in images]
_A = [
nn.functional.pad(
lowerCAmelCase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(lowerCAmelCase_ , lowerCAmelCase_ )
]
return torch.stack(lowerCAmelCase_ ), torch.tensor(lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_=False ) -> int:
with torch.no_grad():
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A = [images]
if single_image:
assert len(lowerCAmelCase_ ) == 1
for i in range(len(lowerCAmelCase_ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(lowerCAmelCase_ , images.pop(lowerCAmelCase_ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
lowerCAmelCase_ , torch.as_tensor(img_tensorize(images.pop(lowerCAmelCase_ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
_A = torch.tensor([im.shape[:2] for im in images] )
_A = self.aug(lowerCAmelCase_ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
_A = [self.normalizer(lowerCAmelCase_ ) for x in images]
# now pad them to do the following operations
_A , _A = self.pad(lowerCAmelCase_ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
_A = torch.true_divide(lowerCAmelCase_ , lowerCAmelCase_ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def snake_case ( snake_case__ :Optional[int] , snake_case__ :Optional[Any]) -> Tuple:
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def snake_case ( snake_case__ :Optional[int] , snake_case__ :Tuple[int, int]) -> Optional[Any]:
assert torch.isfinite(snake_case__).all(), "Box tensor contains infinite or NaN!"
_A , _A = box_size
tensor[:, 0].clamp_(min=0 , max=snake_case__)
tensor[:, 1].clamp_(min=0 , max=snake_case__)
tensor[:, 2].clamp_(min=0 , max=snake_case__)
tensor[:, 3].clamp_(min=0 , max=snake_case__)
| 83 | 1 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def snake_case ( snake_case__ :int , snake_case__ :Dict , snake_case__ :Optional[int] , snake_case__ :int , snake_case__ :int=True , snake_case__ :Tuple="pt") -> List[str]:
_A = {"""add_prefix_space""": True} if isinstance(snake_case__ , snake_case__) and not line.startswith(""" """) else {}
_A = padding_side
return tokenizer(
[line] , max_length=snake_case__ , padding="""max_length""" if pad_to_max_length else None , truncation=snake_case__ , return_tensors=snake_case__ , add_special_tokens=snake_case__ , **snake_case__ , )
def snake_case ( snake_case__ :Optional[int] , snake_case__ :int , snake_case__ :Optional[Any]=None , ) -> Tuple:
_A = input_ids.ne(snake_case__).any(dim=0)
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="train" , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_="" , ) -> Union[str, Any]:
super().__init__()
_A = Path(lowerCAmelCase_ ).joinpath(type_path + """.source""" )
_A = Path(lowerCAmelCase_ ).joinpath(type_path + """.target""" )
_A = self.get_char_lens(self.src_file )
_A = max_source_length
_A = max_target_length
assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}'''
_A = tokenizer
_A = prefix
if n_obs is not None:
_A = self.src_lens[:n_obs]
_A = src_lang
_A = tgt_lang
def __len__( self ) -> Optional[Any]:
return len(self.src_lens )
def __getitem__( self , lowerCAmelCase_ ) -> Dict[str, torch.Tensor]:
_A = index + 1 # linecache starts at 1
_A = self.prefix + linecache.getline(str(self.src_file ) , lowerCAmelCase_ ).rstrip("""\n""" )
_A = linecache.getline(str(self.tgt_file ) , lowerCAmelCase_ ).rstrip("""\n""" )
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowerCAmelCase_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_A = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCAmelCase_ ) else self.tokenizer
)
_A = self.tokenizer.generator if isinstance(self.tokenizer , lowerCAmelCase_ ) else self.tokenizer
_A = encode_line(lowerCAmelCase_ , lowerCAmelCase_ , self.max_source_length , """right""" )
_A = encode_line(lowerCAmelCase_ , lowerCAmelCase_ , self.max_target_length , """right""" )
_A = source_inputs["""input_ids"""].squeeze()
_A = target_inputs["""input_ids"""].squeeze()
_A = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ ) -> Optional[int]:
return [len(lowerCAmelCase_ ) for x in Path(lowerCAmelCase_ ).open().readlines()]
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Dict[str, torch.Tensor]:
_A = torch.stack([x["""input_ids"""] for x in batch] )
_A = torch.stack([x["""attention_mask"""] for x in batch] )
_A = torch.stack([x["""decoder_input_ids"""] for x in batch] )
_A = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowerCAmelCase_ )
else self.tokenizer.pad_token_id
)
_A = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowerCAmelCase_ )
else self.tokenizer.pad_token_id
)
_A = trim_batch(lowerCAmelCase_ , lowerCAmelCase_ )
_A , _A = trim_batch(lowerCAmelCase_ , lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
_A = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
_SCREAMING_SNAKE_CASE = getLogger(__name__)
def snake_case ( snake_case__ :List[List]) -> Optional[Any]:
return list(itertools.chain.from_iterable(snake_case__))
def snake_case ( snake_case__ :str) -> None:
_A = get_git_info()
save_json(snake_case__ , os.path.join(snake_case__ , """git_log.json"""))
def snake_case ( snake_case__ :Optional[Any] , snake_case__ :Union[str, Any] , snake_case__ :Dict=4 , **snake_case__ :Union[str, Any]) -> Dict:
with open(snake_case__ , """w""") as f:
json.dump(snake_case__ , snake_case__ , indent=snake_case__ , **snake_case__)
def snake_case ( snake_case__ :List[str]) -> Optional[Any]:
with open(snake_case__) as f:
return json.load(snake_case__)
def snake_case ( ) -> Dict:
_A = git.Repo(search_parent_directories=snake_case__)
_A = {
"""repo_id""": str(snake_case__),
"""repo_sha""": str(repo.head.object.hexsha),
"""repo_branch""": str(repo.active_branch),
"""hostname""": str(socket.gethostname()),
}
return repo_infos
def snake_case ( snake_case__ :Callable , snake_case__ :Iterable) -> List:
return list(map(snake_case__ , snake_case__))
def snake_case ( snake_case__ :str , snake_case__ :List[Any]) -> Dict:
with open(snake_case__ , """wb""") as f:
return pickle.dump(snake_case__ , snake_case__)
def snake_case ( snake_case__ :int) -> Dict:
def remove_articles(snake_case__ :List[str]):
return re.sub(R"""\b(a|an|the)\b""" , """ """ , snake_case__)
def white_space_fix(snake_case__ :List[Any]):
return " ".join(text.split())
def remove_punc(snake_case__ :List[Any]):
_A = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(snake_case__ :Optional[Any]):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(snake_case__))))
def snake_case ( snake_case__ :str , snake_case__ :Dict) -> int:
_A = normalize_answer(snake_case__).split()
_A = normalize_answer(snake_case__).split()
_A = Counter(snake_case__) & Counter(snake_case__)
_A = sum(common.values())
if num_same == 0:
return 0
_A = 1.0 * num_same / len(snake_case__)
_A = 1.0 * num_same / len(snake_case__)
_A = (2 * precision * recall) / (precision + recall)
return fa
def snake_case ( snake_case__ :Union[str, Any] , snake_case__ :Optional[Any]) -> Optional[int]:
return normalize_answer(snake_case__) == normalize_answer(snake_case__)
def snake_case ( snake_case__ :List[str] , snake_case__ :List[str]) -> Dict:
assert len(snake_case__) == len(snake_case__)
_A = 0
for hypo, pred in zip(snake_case__ , snake_case__):
em += exact_match_score(snake_case__ , snake_case__)
if len(snake_case__) > 0:
em /= len(snake_case__)
return {"em": em}
def snake_case ( snake_case__ :List[Any]) -> Tuple:
return model_prefix.startswith("""rag""")
def snake_case ( snake_case__ :Optional[int] , snake_case__ :Tuple , snake_case__ :Optional[Any]) -> Dict:
_A = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_A = """dropout_rate"""
for p in extra_params:
if getattr(snake_case__ , snake_case__ , snake_case__):
if not hasattr(snake_case__ , snake_case__) and not hasattr(snake_case__ , equivalent_param[p]):
logger.info("""config doesn't have a `{}` attribute""".format(snake_case__))
delattr(snake_case__ , snake_case__)
continue
_A = p if hasattr(snake_case__ , snake_case__) else equivalent_param[p]
setattr(snake_case__ , snake_case__ , getattr(snake_case__ , snake_case__))
delattr(snake_case__ , snake_case__)
return hparams, config
| 83 | from collections import defaultdict
def snake_case ( snake_case__ :int) -> int:
_A = 1
_A = True
for v in tree[start]:
if v not in visited:
ret += dfs(snake_case__)
if ret % 2 == 0:
cuts.append(snake_case__)
return ret
def snake_case ( ) -> Any:
dfs(1)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 10, 9
_SCREAMING_SNAKE_CASE = defaultdict(list)
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 83 | 1 |
def snake_case ( snake_case__ :int , snake_case__ :int) -> str:
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""")
_A = str(bin(snake_case__))
binary_number += "0" * shift_amount
return binary_number
def snake_case ( snake_case__ :int , snake_case__ :int) -> str:
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""")
_A = str(bin(snake_case__))[2:]
if shift_amount >= len(snake_case__):
return "0b0"
_A = binary_number[: len(snake_case__) - shift_amount]
return "0b" + shifted_binary_number
def snake_case ( snake_case__ :int , snake_case__ :int) -> str:
if number >= 0: # Get binary representation of positive number
_A = """0""" + str(bin(snake_case__)).strip("""-""")[2:]
else: # Get binary (2's complement) representation of negative number
_A = len(bin(snake_case__)[3:]) # Find 2's complement of number
_A = bin(abs(snake_case__) - (1 << binary_number_length))[3:]
_A = (
"""1""" + """0""" * (binary_number_length - len(snake_case__)) + binary_number
)
if shift_amount >= len(snake_case__):
return "0b" + binary_number[0] * len(snake_case__)
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(snake_case__) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | import heapq
def snake_case ( snake_case__ :dict) -> set[int]:
_A = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(snake_case__ , [-1 * len(snake_case__), (key, value)])
# chosen_vertices = set of chosen vertices
_A = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
_A = heapq.heappop(snake_case__)[1][0]
chosen_vertices.add(snake_case__)
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
_A = elem[1][1].index(snake_case__)
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(snake_case__)
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 83 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Dict = '''biogpt'''
def __init__( self , lowerCAmelCase_=4_23_84 , lowerCAmelCase_=10_24 , lowerCAmelCase_=24 , lowerCAmelCase_=16 , lowerCAmelCase_=40_96 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=10_24 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-12 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , **lowerCAmelCase_ , ) -> Union[str, Any]:
_A = vocab_size
_A = max_position_embeddings
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = initializer_range
_A = layer_norm_eps
_A = scale_embedding
_A = use_cache
_A = layerdrop
_A = activation_dropout
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
| 83 | import math
import unittest
def snake_case ( snake_case__ :int) -> bool:
assert isinstance(snake_case__ , snake_case__) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def UpperCAmelCase ( self ) -> Dict:
with self.assertRaises(lowerCAmelCase_ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 83 | 1 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
_SCREAMING_SNAKE_CASE = '<<<<<<< This should probably be modified because it mentions: '
_SCREAMING_SNAKE_CASE = '=======\n>>>>>>>\n'
_SCREAMING_SNAKE_CASE = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
_SCREAMING_SNAKE_CASE = [
# (pattern, replacement)
# Order is important here for some replacements
(R'tfds\.core', R'datasets'),
(R'tf\.io\.gfile\.GFile', R'open'),
(R'tf\.([\w\d]+)', R'datasets.Value(\'\1\')'),
(R'tfds\.features\.Text\(\)', R'datasets.Value(\'string\')'),
(R'tfds\.features\.Text\(', R'datasets.Value(\'string\'),'),
(R'features\s*=\s*tfds.features.FeaturesDict\(', R'features=datasets.Features('),
(R'tfds\.features\.FeaturesDict\(', R'dict('),
(R'The TensorFlow Datasets Authors', R'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(R'tfds\.', R'datasets.'),
(R'dl_manager\.manual_dir', R'self.config.data_dir'),
(R'self\.builder_config', R'self.config'),
]
def snake_case ( snake_case__ :Namespace) -> int:
return ConvertCommand(args.tfds_path , args.datasets_directory)
class a ( __lowerCAmelCase ):
"""simple docstring"""
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ ) -> Any:
_A = parser.add_parser(
"""convert""" , help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""" , )
train_parser.add_argument(
"""--tfds_path""" , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""" , )
train_parser.add_argument(
"""--datasets_directory""" , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="""Path to the HuggingFace Datasets folder.""" )
train_parser.set_defaults(func=lowerCAmelCase_ )
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ ) -> Any:
_A = get_logger("""datasets-cli/converting""" )
_A = tfds_path
_A = datasets_directory
def UpperCAmelCase ( self ) -> List[str]:
if os.path.isdir(self._tfds_path ):
_A = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
_A = os.path.dirname(self._tfds_path )
else:
raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""" )
_A = os.path.abspath(self._datasets_directory )
self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
_A = []
_A = []
_A = {}
if os.path.isdir(self._tfds_path ):
_A = os.listdir(lowerCAmelCase_ )
else:
_A = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F'''Looking at file {f_name}''' )
_A = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
_A = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
if not os.path.isfile(lowerCAmelCase_ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("""Skipping file""" )
continue
with open(lowerCAmelCase_ , encoding="""utf-8""" ) as f:
_A = f.readlines()
_A = []
_A = False
_A = False
_A = []
for line in lines:
_A = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
_A = """import datasets\n"""
elif "import tensorflow" in out_line:
# order is important here
_A = """"""
continue
elif "from absl import logging" in out_line:
_A = """from datasets import logging\n"""
elif "getLogger" in out_line:
_A = out_line.replace("""getLogger""" , """get_logger""" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
_A = True
_A = list(filter(lambda lowerCAmelCase_ : e in out_line , lowerCAmelCase_ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCAmelCase_ ) + """\n""" )
out_lines.append(lowerCAmelCase_ )
out_lines.append(lowerCAmelCase_ )
continue
else:
for pattern, replacement in TO_CONVERT:
_A = re.sub(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
_A = re.match(r"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""" , lowerCAmelCase_ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(""",""" ) )
_A = """from . import """ + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
_A = True
out_lines.append(lowerCAmelCase_ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
_A = f_name.replace(""".py""" , """""" )
_A = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
_A = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
self._logger.info(F'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowerCAmelCase_ )
if needs_manual_update:
with_manual_update.append(lowerCAmelCase_ )
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.writelines(lowerCAmelCase_ )
self._logger.info(F'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
_A = os.path.basename(lowerCAmelCase_ )
_A = imports_to_builder_map[f_name.replace(""".py""" , """""" )]
self._logger.info(F'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(lowerCAmelCase_ , lowerCAmelCase_ )
except KeyError:
self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 83 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 83 | 1 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name
_SCREAMING_SNAKE_CASE = 256
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :List[Any] = ['''melgan''']
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> None:
super().__init__()
# From MELGAN
_A = math.log(1E-5 ) # Matches MelGAN training.
_A = 4.0 # Largest value for most examples
_A = 1_28
self.register_modules(
notes_encoder=lowerCAmelCase_ , continuous_encoder=lowerCAmelCase_ , decoder=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , melgan=lowerCAmelCase_ , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=(-1.0, 1.0) , lowerCAmelCase_=False ) -> str:
_A , _A = output_range
if clip:
_A = torch.clip(lowerCAmelCase_ , self.min_value , self.max_value )
# Scale to [0, 1].
_A = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=(-1.0, 1.0) , lowerCAmelCase_=False ) -> Optional[Any]:
_A , _A = input_range
_A = torch.clip(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) if clip else outputs
# Scale to [0, 1].
_A = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_A = input_tokens > 0
_A , _A = self.notes_encoder(
encoder_input_tokens=lowerCAmelCase_ , encoder_inputs_mask=lowerCAmelCase_ )
_A , _A = self.continuous_encoder(
encoder_inputs=lowerCAmelCase_ , encoder_inputs_mask=lowerCAmelCase_ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
_A = noise_time
if not torch.is_tensor(lowerCAmelCase_ ):
_A = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(lowerCAmelCase_ ) and len(timesteps.shape ) == 0:
_A = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_A = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
_A = self.decoder(
encodings_and_masks=lowerCAmelCase_ , decoder_input_tokens=lowerCAmelCase_ , decoder_noise_time=lowerCAmelCase_ )
return logits
@torch.no_grad()
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = 1_00 , lowerCAmelCase_ = True , lowerCAmelCase_ = "numpy" , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(lowerCAmelCase_ )}.''' )
_A = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
_A = np.zeros([1, 0, self.n_dims] , np.floataa )
_A = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=lowerCAmelCase_ , device=self.device )
for i, encoder_input_tokens in enumerate(lowerCAmelCase_ ):
if i == 0:
_A = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
_A = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=lowerCAmelCase_ , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
_A = ones
_A = self.scale_features(
lowerCAmelCase_ , output_range=[-1.0, 1.0] , clip=lowerCAmelCase_ )
_A = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=lowerCAmelCase_ , continuous_mask=lowerCAmelCase_ , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
_A = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=lowerCAmelCase_ , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(lowerCAmelCase_ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
_A = self.decode(
encodings_and_masks=lowerCAmelCase_ , input_tokens=lowerCAmelCase_ , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
_A = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_A = self.scale_to_features(lowerCAmelCase_ , input_range=[-1.0, 1.0] )
_A = mel[:1]
_A = mel.cpu().float().numpy()
_A = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCAmelCase_ , lowerCAmelCase_ )
logger.info("""Generated segment""" , lowerCAmelCase_ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"""Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'.""" )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"""Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'.""" )
if output_type == "numpy":
_A = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
_A = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=lowerCAmelCase_ )
| 83 | from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase )
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
self.check_model_type(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Tuple:
_A , _A = {}, {}
if padding is not None:
_A = padding
if truncation is not None:
_A = truncation
if top_k is not None:
_A = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ ) -> Union[str, Any]:
if isinstance(lowerCAmelCase_ , (Image.Image, str) ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A = {"""image""": image, """question""": question}
else:
_A = image
_A = super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ )
return results
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> Any:
_A = load_image(inputs["""image"""] )
_A = self.tokenizer(
inputs["""question"""] , return_tensors=self.framework , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ )
_A = self.image_processor(images=lowerCAmelCase_ , return_tensors=self.framework )
model_inputs.update(lowerCAmelCase_ )
return model_inputs
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
_A = self.model(**lowerCAmelCase_ )
return model_outputs
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=5 ) -> Union[str, Any]:
if top_k > self.model.config.num_labels:
_A = self.model.config.num_labels
if self.framework == "pt":
_A = model_outputs.logits.sigmoid()[0]
_A , _A = probs.topk(lowerCAmelCase_ )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
_A = scores.tolist()
_A = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowerCAmelCase_ , lowerCAmelCase_ )]
| 83 | 1 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
_SCREAMING_SNAKE_CASE = 'sshleifer/bart-tiny-random'
_SCREAMING_SNAKE_CASE = 'patrickvonplaten/t5-tiny-random'
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase ( self ) -> Any:
return AutoConfig.from_pretrained(lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> List[str]:
_A , *_A = create_student_by_copying_alternating_layers(lowerCAmelCase_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def UpperCAmelCase ( self ) -> Any:
_A , *_A = create_student_by_copying_alternating_layers(lowerCAmelCase_ , tempfile.mkdtemp() , e=1 , d=lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A , *_A = create_student_by_copying_alternating_layers(lowerCAmelCase_ , tempfile.mkdtemp() , e=1 , d=lowerCAmelCase_ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def UpperCAmelCase ( self ) -> Dict:
_A , *_A = create_student_by_copying_alternating_layers(lowerCAmelCase_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def UpperCAmelCase ( self ) -> Dict:
with self.assertRaises(lowerCAmelCase_ ):
create_student_by_copying_alternating_layers(lowerCAmelCase_ , tempfile.mkdtemp() , e=lowerCAmelCase_ , d=lowerCAmelCase_ )
| 83 | import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def snake_case ( snake_case__ :str , snake_case__ :str , snake_case__ :str , snake_case__ :PreTrainedTokenizer , snake_case__ :int , snake_case__ :Optional[int] = None , ) -> Optional[int]:
_A = {}
if train_file is not None:
_A = [train_file]
if eval_file is not None:
_A = [eval_file]
if test_file is not None:
_A = [test_file]
_A = datasets.load_dataset("""csv""" , data_files=snake_case__)
_A = list(ds[list(files.keys())[0]].features.keys())
_A = features_name.pop(snake_case__)
_A = list(set(ds[list(files.keys())[0]][label_name]))
_A = {label: i for i, label in enumerate(snake_case__)}
_A = tokenizer.model_input_names
_A = {}
if len(snake_case__) == 1:
for k in files.keys():
_A = ds[k].map(
lambda snake_case__: tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=snake_case__ , max_length=snake_case__ , padding="""max_length""") , batched=snake_case__ , )
elif len(snake_case__) == 2:
for k in files.keys():
_A = ds[k].map(
lambda snake_case__: tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=snake_case__ , max_length=snake_case__ , padding="""max_length""" , ) , batched=snake_case__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_A = {k: v for k, v in ex.items() if k in input_names}
_A = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_A = {k: v for k, v in ex.items() if k in input_names}
_A = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_A = {k: v for k, v in ex.items() if k in input_names}
_A = labelaid[ex[label_name]]
yield (d, label)
_A = (
tf.data.Dataset.from_generator(
snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_A = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN])))
_A = (
tf.data.Dataset.from_generator(
snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_A = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION])))
_A = (
tf.data.Dataset.from_generator(
snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_A = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST])))
return train_ds, val_ds, test_ds, labelaid
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class a :
"""simple docstring"""
lowerCamelCase :int = field(metadata={'''help''': '''Which column contains the label'''} )
lowerCamelCase :str = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the training file'''} )
lowerCamelCase :Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the development file'''} )
lowerCamelCase :Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the test file'''} )
lowerCamelCase :int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowerCamelCase :bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class a :
"""simple docstring"""
lowerCamelCase :str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowerCamelCase :bool = field(default=__lowerCAmelCase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def snake_case ( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments))
_A , _A , _A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""")
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(
F'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1)}, '''
F'''16-bits training: {training_args.fpaa}''')
logger.info(F'''Training/evaluation parameters {training_args}''')
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_A , _A , _A , _A = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=snake_case__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
_A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(snake_case__) , labelaid=snake_case__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
_A = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path) , config=snake_case__ , cache_dir=model_args.cache_dir , )
def compute_metrics(snake_case__ :EvalPrediction) -> Dict:
_A = np.argmax(p.predictions , axis=1)
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_A = TFTrainer(
model=snake_case__ , args=snake_case__ , train_dataset=snake_case__ , eval_dataset=snake_case__ , compute_metrics=snake_case__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
_A = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""")
_A = trainer.evaluate()
_A = os.path.join(training_args.output_dir , """eval_results.txt""")
with open(snake_case__ , """w""") as writer:
logger.info("""***** Eval results *****""")
for key, value in result.items():
logger.info(F''' {key} = {value}''')
writer.write(F'''{key} = {value}\n''')
results.update(snake_case__)
return results
if __name__ == "__main__":
main()
| 83 | 1 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
_A = question_encoder
_A = generator
_A = self.question_encoder
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Any:
if os.path.isfile(lowerCAmelCase_ ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
_A = os.path.join(lowerCAmelCase_ , """question_encoder_tokenizer""" )
_A = os.path.join(lowerCAmelCase_ , """generator_tokenizer""" )
self.question_encoder.save_pretrained(lowerCAmelCase_ )
self.generator.save_pretrained(lowerCAmelCase_ )
@classmethod
def UpperCAmelCase ( cls , lowerCAmelCase_ , **lowerCAmelCase_ ) -> Tuple:
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
_A = kwargs.pop("""config""" , lowerCAmelCase_ )
if config is None:
_A = RagConfig.from_pretrained(lowerCAmelCase_ )
_A = AutoTokenizer.from_pretrained(
lowerCAmelCase_ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
_A = AutoTokenizer.from_pretrained(
lowerCAmelCase_ , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=lowerCAmelCase_ , generator=lowerCAmelCase_ )
def __call__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Tuple:
return self.current_tokenizer(*lowerCAmelCase_ , **lowerCAmelCase_ )
def UpperCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[int]:
return self.generator.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def UpperCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[Any]:
return self.generator.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Tuple:
_A = self.question_encoder
def UpperCAmelCase ( self ) -> Any:
_A = self.generator
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "longest" , lowerCAmelCase_ = None , lowerCAmelCase_ = True , **lowerCAmelCase_ , ) -> BatchEncoding:
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , lowerCAmelCase_ , )
if max_length is None:
_A = self.current_tokenizer.model_max_length
_A = self(
lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , **lowerCAmelCase_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_A = self.current_tokenizer.model_max_length
_A = self(
text_target=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , padding=lowerCAmelCase_ , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ , **lowerCAmelCase_ , )
_A = labels["""input_ids"""]
return model_inputs
| 83 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Union[str, Any] = '''speech_to_text'''
lowerCamelCase :List[str] = ['''past_key_values''']
lowerCamelCase :str = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowerCAmelCase_=1_00_00 , lowerCAmelCase_=12 , lowerCAmelCase_=20_48 , lowerCAmelCase_=4 , lowerCAmelCase_=6 , lowerCAmelCase_=20_48 , lowerCAmelCase_=4 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_="relu" , lowerCAmelCase_=2_56 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=2 , lowerCAmelCase_=True , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_=60_00 , lowerCAmelCase_=10_24 , lowerCAmelCase_=2 , lowerCAmelCase_=(5, 5) , lowerCAmelCase_=10_24 , lowerCAmelCase_=80 , lowerCAmelCase_=1 , **lowerCAmelCase_ , ) -> Tuple:
_A = vocab_size
_A = d_model
_A = encoder_ffn_dim
_A = encoder_layers
_A = encoder_attention_heads
_A = decoder_ffn_dim
_A = decoder_layers
_A = decoder_attention_heads
_A = dropout
_A = attention_dropout
_A = activation_dropout
_A = activation_function
_A = init_std
_A = encoder_layerdrop
_A = decoder_layerdrop
_A = use_cache
_A = encoder_layers
_A = scale_embedding # scale factor will be sqrt(d_model) if True
_A = max_source_positions
_A = max_target_positions
_A = num_conv_layers
_A = list(lowerCAmelCase_ )
_A = conv_channels
_A = input_feat_per_channel
_A = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
F'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 83 | 1 |
def snake_case ( snake_case__ :Optional[Any]) -> List[Any]:
_A = 0
_A = len(snake_case__)
for i in range(n - 1):
for j in range(i + 1 , snake_case__):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def snake_case ( snake_case__ :int) -> Optional[Any]:
if len(snake_case__) <= 1:
return arr, 0
_A = len(snake_case__) // 2
_A = arr[0:mid]
_A = arr[mid:]
_A , _A = count_inversions_recursive(snake_case__)
_A , _A = count_inversions_recursive(snake_case__)
_A , _A = _count_cross_inversions(snake_case__ , snake_case__)
_A = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def snake_case ( snake_case__ :Tuple , snake_case__ :Dict) -> Dict:
_A = []
_A = _A = _A = 0
while i < len(snake_case__) and j < len(snake_case__):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(snake_case__) - i
r.append(q[j])
j += 1
else:
r.append(p[i])
i += 1
if i < len(snake_case__):
r.extend(p[i:])
else:
r.extend(q[j:])
return r, num_inversion
def snake_case ( ) -> Optional[Any]:
_A = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
_A = count_inversions_bf(snake_case__)
_A , _A = count_inversions_recursive(snake_case__)
assert num_inversions_bf == num_inversions_recursive == 8
print("""number of inversions = """ , snake_case__)
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
_A = count_inversions_bf(snake_case__)
_A , _A = count_inversions_recursive(snake_case__)
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , snake_case__)
# an empty list should also have zero inversions
_A = []
_A = count_inversions_bf(snake_case__)
_A , _A = count_inversions_recursive(snake_case__)
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , snake_case__)
if __name__ == "__main__":
main()
| 83 | from __future__ import annotations
from collections.abc import Callable
def snake_case ( snake_case__ :Callable[[int | float], int | float] , snake_case__ :int | float , snake_case__ :int | float , snake_case__ :int = 100 , ) -> float:
_A = x_start
_A = fnc(snake_case__)
_A = 0.0
for _ in range(snake_case__):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
_A = (x_end - x_start) / steps + xa
_A = fnc(snake_case__)
area += abs(fxa + fxa) * (xa - xa) / 2
# Increment step
_A = xa
_A = fxa
return area
if __name__ == "__main__":
def snake_case ( snake_case__ :Tuple) -> List[str]:
return x**3 + x**2
print('f(x) = x^3 + x^2')
print('The area between the curve, x = -5, x = 5 and the x axis is:')
_SCREAMING_SNAKE_CASE = 10
while i <= 100_000:
print(F'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10
| 83 | 1 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Optional[int] = (DDPMScheduler,)
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> str:
_A = {
"""num_train_timesteps""": 10_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**lowerCAmelCase_ )
return config
def UpperCAmelCase ( self ) -> int:
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Optional[int]:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Tuple:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> int:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Tuple:
self.check_over_configs(thresholding=lowerCAmelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase_ , prediction_type=lowerCAmelCase_ , sample_max_value=lowerCAmelCase_ , )
def UpperCAmelCase ( self ) -> Dict:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Any:
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> int:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**lowerCAmelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.02 ) ) < 1E-5
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**lowerCAmelCase_ )
_A = len(lowerCAmelCase_ )
_A = self.dummy_model()
_A = self.dummy_sample_deter
_A = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_A = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_A = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_A = pred_prev_sample
_A = torch.sum(torch.abs(lowerCAmelCase_ ) )
_A = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config(prediction_type="""v_prediction""" )
_A = scheduler_class(**lowerCAmelCase_ )
_A = len(lowerCAmelCase_ )
_A = self.dummy_model()
_A = self.dummy_sample_deter
_A = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_A = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_A = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_A = pred_prev_sample
_A = torch.sum(torch.abs(lowerCAmelCase_ ) )
_A = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def UpperCAmelCase ( self ) -> Dict:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**lowerCAmelCase_ )
_A = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
_A = scheduler.timesteps
for i, timestep in enumerate(lowerCAmelCase_ ):
if i == len(lowerCAmelCase_ ) - 1:
_A = -1
else:
_A = timesteps[i + 1]
_A = scheduler.previous_timestep(lowerCAmelCase_ )
_A = prev_t.item()
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> str:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**lowerCAmelCase_ )
_A = [1_00, 87, 50, 51, 0]
with self.assertRaises(lowerCAmelCase_ , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**lowerCAmelCase_ )
_A = [1_00, 87, 50, 1, 0]
_A = len(lowerCAmelCase_ )
with self.assertRaises(lowerCAmelCase_ , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Any:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**lowerCAmelCase_ )
_A = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
| 83 | import numpy as np
import qiskit
def snake_case ( snake_case__ :int = 8 , snake_case__ :int | None = None) -> str:
_A = np.random.default_rng(seed=snake_case__)
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
_A = 6 * key_len
# Measurement basis for Alice's qubits.
_A = rng.integers(2 , size=snake_case__)
# The set of states Alice will prepare.
_A = rng.integers(2 , size=snake_case__)
# Measurement basis for Bob's qubits.
_A = rng.integers(2 , size=snake_case__)
# Quantum Circuit to simulate BB84
_A = qiskit.QuantumCircuit(snake_case__ , name="""BB84""")
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(snake_case__):
if alice_state[index] == 1:
bbaa_circ.x(snake_case__)
if alice_basis[index] == 1:
bbaa_circ.h(snake_case__)
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(snake_case__):
if bob_basis[index] == 1:
bbaa_circ.h(snake_case__)
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
_A = qiskit.Aer.get_backend("""aer_simulator""")
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
_A = qiskit.execute(snake_case__ , snake_case__ , shots=1 , seed_simulator=snake_case__)
# Returns the result of measurement.
_A = job.result().get_counts(snake_case__).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
_A = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
snake_case__ , snake_case__ , snake_case__)
if alice_basis_bit == bob_basis_bit
])
# Get final key. Pad with 0 if too short, otherwise truncate.
_A = gen_key[:key_len] if len(snake_case__) >= key_len else gen_key.ljust(snake_case__ , """0""")
return key
if __name__ == "__main__":
print(F'''The generated key is : {bbaa(8, seed=0)}''')
from doctest import testmod
testmod()
| 83 | 1 |
from typing import Any
import numpy as np
def snake_case ( snake_case__ :np.ndarray) -> bool:
return np.array_equal(snake_case__ , matrix.conjugate().T)
def snake_case ( snake_case__ :np.ndarray , snake_case__ :np.ndarray) -> Any:
_A = v.conjugate().T
_A = v_star.dot(snake_case__)
assert isinstance(snake_case__ , np.ndarray)
return (v_star_dot.dot(snake_case__)) / (v_star.dot(snake_case__))
def snake_case ( ) -> None:
_A = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]])
_A = np.array([[1], [2], [3]])
assert is_hermitian(snake_case__), F'''{a} is not hermitian.'''
print(rayleigh_quotient(snake_case__ , snake_case__))
_A = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]])
assert is_hermitian(snake_case__), F'''{a} is not hermitian.'''
assert rayleigh_quotient(snake_case__ , snake_case__) == float(3)
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 83 | import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def snake_case ( snake_case__ :int) -> Optional[int]:
return EnvironmentCommand()
def snake_case ( snake_case__ :Tuple) -> List[str]:
return EnvironmentCommand(args.accelerate_config_file)
class a ( __lowerCAmelCase ):
"""simple docstring"""
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ ) -> Tuple:
_A = parser.add_parser("""env""" )
download_parser.set_defaults(func=lowerCAmelCase_ )
download_parser.add_argument(
"""--accelerate-config_file""" , default=lowerCAmelCase_ , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=lowerCAmelCase_ )
def __init__( self , lowerCAmelCase_ , *lowerCAmelCase_ ) -> None:
_A = accelerate_config_file
def UpperCAmelCase ( self ) -> Dict:
_A = """not installed"""
if is_safetensors_available():
import safetensors
_A = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
_A = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
_A = """not installed"""
_A = _A = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_A = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(lowerCAmelCase_ ):
_A = load_config_from_file(self._accelerate_config_file ).to_dict()
_A = (
"""\n""".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
else F'''\t{accelerate_config}'''
)
_A = """not installed"""
_A = """NA"""
if is_torch_available():
import torch
_A = torch.__version__
_A = torch.cuda.is_available()
_A = """not installed"""
_A = """NA"""
if is_tf_available():
import tensorflow as tf
_A = tf.__version__
try:
# deprecated in v2.1
_A = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_A = bool(tf.config.list_physical_devices("""GPU""" ) )
_A = """not installed"""
_A = """not installed"""
_A = """not installed"""
_A = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
_A = flax.__version__
_A = jax.__version__
_A = jaxlib.__version__
_A = jax.lib.xla_bridge.get_backend().platform
_A = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": F'''{safetensors_version}''',
"""Accelerate version""": F'''{accelerate_version}''',
"""Accelerate config""": F'''{accelerate_config_str}''',
"""PyTorch version (GPU?)""": F'''{pt_version} ({pt_cuda_available})''',
"""Tensorflow version (GPU?)""": F'''{tf_version} ({tf_cuda_available})''',
"""Flax version (CPU?/GPU?/TPU?)""": F'''{flax_version} ({jax_backend})''',
"""Jax version""": F'''{jax_version}''',
"""JaxLib version""": F'''{jaxlib_version}''',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(lowerCAmelCase_ ) )
return info
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ ) -> Tuple:
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 83 | 1 |
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'
),
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :str = '''xlm-prophetnet'''
lowerCamelCase :Tuple = ['''past_key_values''']
lowerCamelCase :Any = {
'''num_attention_heads''': '''num_encoder_attention_heads''',
}
def __init__( self , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = "gelu" , lowerCAmelCase_ = 3_05_22 , lowerCAmelCase_ = 10_24 , lowerCAmelCase_ = 40_96 , lowerCAmelCase_ = 12 , lowerCAmelCase_ = 16 , lowerCAmelCase_ = 40_96 , lowerCAmelCase_ = 12 , lowerCAmelCase_ = 16 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 5_12 , lowerCAmelCase_ = 0.02 , lowerCAmelCase_ = True , lowerCAmelCase_ = True , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 32 , lowerCAmelCase_ = 1_28 , lowerCAmelCase_ = False , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = True , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 2 , **lowerCAmelCase_ , ) -> Optional[Any]:
_A = vocab_size
_A = hidden_size
_A = encoder_ffn_dim
_A = num_encoder_layers
_A = num_encoder_attention_heads
_A = decoder_ffn_dim
_A = num_decoder_layers
_A = num_decoder_attention_heads
_A = max_position_embeddings
_A = init_std # Normal(0, this parameter)
_A = activation_function
# parameters for xlmprophetnet
_A = ngram
_A = num_buckets
_A = relative_max_distance
_A = disable_ngram_loss
_A = eps
# 3 Types of Dropout
_A = attention_dropout
_A = activation_dropout
_A = dropout
_A = use_cache
super().__init__(
pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , add_cross_attention=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
@property
def UpperCAmelCase ( self ) -> int:
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]:
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"""
""" `num_decoder_layers`.""" )
| 83 | import colorsys
from PIL import Image # type: ignore
def snake_case ( snake_case__ :float , snake_case__ :float , snake_case__ :int) -> float:
_A = x
_A = y
for step in range(snake_case__): # noqa: B007
_A = a * a - b * b + x
_A = 2 * a * b + y
_A = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def snake_case ( snake_case__ :float) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def snake_case ( snake_case__ :float) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(snake_case__ , 1 , 1))
def snake_case ( snake_case__ :int = 800 , snake_case__ :int = 600 , snake_case__ :float = -0.6 , snake_case__ :float = 0 , snake_case__ :float = 3.2 , snake_case__ :int = 50 , snake_case__ :bool = True , ) -> Image.Image:
_A = Image.new("""RGB""" , (image_width, image_height))
_A = img.load()
# loop through the image-coordinates
for image_x in range(snake_case__):
for image_y in range(snake_case__):
# determine the figure-coordinates based on the image-coordinates
_A = figure_width / image_width * image_height
_A = figure_center_x + (image_x / image_width - 0.5) * figure_width
_A = figure_center_y + (image_y / image_height - 0.5) * figure_height
_A = get_distance(snake_case__ , snake_case__ , snake_case__)
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_A = get_color_coded_rgb(snake_case__)
else:
_A = get_black_and_white_rgb(snake_case__)
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_SCREAMING_SNAKE_CASE = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 83 | 1 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=4 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.1 , lowerCAmelCase_=True , lowerCAmelCase_=5_12 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ) -> int:
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_multiple_size
_A = hidden_act
_A = hidden_dropout
_A = attention_dropout
_A = weight_tying
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = scope
def UpperCAmelCase ( self ) -> List[Any]:
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = self.get_config()
return config, input_ids, input_mask, token_labels
def UpperCAmelCase ( self ) -> List[Any]:
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self ) -> str:
_A , _A , _A , _A = self.prepare_config_and_inputs()
_A = True
return config, input_ids, input_mask, token_labels
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
_A = GPTNeoXJapaneseModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
_A = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_A = True
_A = GPTNeoXJapaneseModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_A = GPTNeoXJapaneseForCausalLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_A = True
_A = GPTNeoXJapaneseForCausalLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
# first forward pass
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_ )
_A = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_A = ids_tensor((self.batch_size, 3) , config.vocab_size )
_A = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_A = torch.cat([input_ids, next_tokens] , dim=-1 )
_A = torch.cat([input_mask, next_mask] , dim=-1 )
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ )
_A = output_from_no_past["""hidden_states"""][0]
_A = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , )["""hidden_states"""][0]
# select random slice
_A = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_A = output_from_no_past[:, -3:, random_slice_idx].detach()
_A = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 ) )
def UpperCAmelCase ( self ) -> Dict:
_A = self.prepare_config_and_inputs()
_A , _A , _A , _A = config_and_inputs
_A = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :str = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
lowerCamelCase :str = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
lowerCamelCase :Dict = (
{'''feature-extraction''': GPTNeoXJapaneseModel, '''text-generation''': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
lowerCamelCase :Any = False
lowerCamelCase :List[Any] = False
lowerCamelCase :Union[str, Any] = False
lowerCamelCase :str = False
def UpperCAmelCase ( self ) -> List[Any]:
_A = GPTNeoXJapaneseModelTester(self )
_A = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def UpperCAmelCase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> List[Any]:
_A , _A , _A , _A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Tuple:
_A , _A , _A , _A = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
# This regression test was failing with PyTorch < 1.3
_A , _A , _A , _A = self.model_tester.prepare_config_and_inputs_for_decoder()
_A = None
self.model_tester.create_and_check_model_as_decoder(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Tuple:
_A , _A , _A , _A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> List[str]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCAmelCase_ )
@slow
def UpperCAmelCase ( self ) -> int:
_A = """abeja/gpt-neox-japanese-2.7b"""
_A = ["""データサイエンティストとは、""", """100年後に必要とされる会社は、""", """フルリモートの環境で働くために必要なことは、""", """国境の長いトンネルを抜けると""", """美味しい日本食といえば、"""]
_A = [
"""データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。""",
"""100年後に必要とされる会社は、「人」が中心の会社です。""",
"""フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。""",
"""国境の長いトンネルを抜けると、そこは雪国だった。""",
"""美味しい日本食といえば、やっぱりお寿司ですよね。""",
]
_A = GPTNeoXJapaneseTokenizer.from_pretrained(lowerCAmelCase_ )
_A = GPTNeoXJapaneseForCausalLM.from_pretrained(lowerCAmelCase_ )
_A = []
for prompt in prompts:
_A = tokenizer(lowerCAmelCase_ , return_tensors="""pt""" ).input_ids
_A = model.generate(lowerCAmelCase_ , max_length=50 )
_A = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 83 | import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
_SCREAMING_SNAKE_CASE = datasets.logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
_SCREAMING_SNAKE_CASE = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
_SCREAMING_SNAKE_CASE = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def snake_case ( snake_case__ :Optional[Any] , snake_case__ :str , snake_case__ :List[str]=False , snake_case__ :Dict=False , snake_case__ :Any=True , snake_case__ :List[str]=False , snake_case__ :Optional[Any]="dummy_doc") -> List[Any]:
_A = {doc: key_lines}
_A = {doc: sys_lines}
_A = {}
_A = 0
_A = 0
_A = 0
_A = 0
_A = 0
_A = 0
_A , _A = reader.get_doc_mentions(snake_case__ , key_doc_lines[doc] , snake_case__)
key_singletons_num += singletons_num
if NP_only or min_span:
_A = reader.set_annotated_parse_trees(snake_case__ , key_doc_lines[doc] , snake_case__ , snake_case__)
_A , _A = reader.get_doc_mentions(snake_case__ , sys_doc_lines[doc] , snake_case__)
sys_singletons_num += singletons_num
if NP_only or min_span:
_A = reader.set_annotated_parse_trees(snake_case__ , key_doc_lines[doc] , snake_case__ , snake_case__)
if remove_nested:
_A , _A = reader.remove_nested_coref_mentions(snake_case__ , snake_case__)
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
_A , _A = reader.remove_nested_coref_mentions(snake_case__ , snake_case__)
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
_A = reader.get_mention_assignments(snake_case__ , snake_case__)
_A = reader.get_mention_assignments(snake_case__ , snake_case__)
_A = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''')
logger.info(
"""Number of resulting singleton clusters in the key """
F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''')
if not keep_singletons:
logger.info(
F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
"""files, respectively""")
return doc_coref_infos
def snake_case ( snake_case__ :Tuple , snake_case__ :Tuple , snake_case__ :Dict , snake_case__ :Dict , snake_case__ :Tuple , snake_case__ :Dict , snake_case__ :Tuple) -> int:
_A = get_coref_infos(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__)
_A = {}
_A = 0
_A = 0
for name, metric in metrics:
_A , _A , _A = evaluator.evaluate_documents(snake_case__ , snake_case__ , beta=1)
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa})
logger.info(
name.ljust(10) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , )
if conll_subparts_num == 3:
_A = (conll / 3) * 100
logger.info(F'''CoNLL score: {conll:.2f}''')
output_scores.update({"""conll_score""": conll})
return output_scores
def snake_case ( snake_case__ :Union[str, Any]) -> List[Any]:
_A = False
for line in key_lines:
if not line.startswith("""#"""):
if len(line.split()) > 6:
_A = line.split()[5]
if not parse_col == "-":
_A = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> Union[str, Any]:
_A = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
_A = util.check_gold_parse_annotation(lowerCAmelCase_ )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
_A = evaluate(
key_lines=lowerCAmelCase_ , sys_lines=lowerCAmelCase_ , metrics=lowerCAmelCase_ , NP_only=lowerCAmelCase_ , remove_nested=lowerCAmelCase_ , keep_singletons=lowerCAmelCase_ , min_span=lowerCAmelCase_ , )
return score
| 83 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :int = '''table-transformer'''
lowerCamelCase :int = ['''past_key_values''']
lowerCamelCase :Dict = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=3 , lowerCAmelCase_=1_00 , lowerCAmelCase_=6 , lowerCAmelCase_=20_48 , lowerCAmelCase_=8 , lowerCAmelCase_=6 , lowerCAmelCase_=20_48 , lowerCAmelCase_=8 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=True , lowerCAmelCase_="relu" , lowerCAmelCase_=2_56 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1.0 , lowerCAmelCase_=False , lowerCAmelCase_="sine" , lowerCAmelCase_="resnet50" , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=1 , lowerCAmelCase_=5 , lowerCAmelCase_=2 , lowerCAmelCase_=1 , lowerCAmelCase_=1 , lowerCAmelCase_=5 , lowerCAmelCase_=2 , lowerCAmelCase_=0.1 , **lowerCAmelCase_ , ) -> Union[str, Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_A = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A = backbone_config.get("""model_type""" )
_A = CONFIG_MAPPING[backbone_model_type]
_A = config_class.from_dict(lowerCAmelCase_ )
# set timm attributes to None
_A , _A , _A = None, None, None
_A = use_timm_backbone
_A = backbone_config
_A = num_channels
_A = num_queries
_A = d_model
_A = encoder_ffn_dim
_A = encoder_layers
_A = encoder_attention_heads
_A = decoder_ffn_dim
_A = decoder_layers
_A = decoder_attention_heads
_A = dropout
_A = attention_dropout
_A = activation_dropout
_A = activation_function
_A = init_std
_A = init_xavier_std
_A = encoder_layerdrop
_A = decoder_layerdrop
_A = encoder_layers
_A = auxiliary_loss
_A = position_embedding_type
_A = backbone
_A = use_pretrained_backbone
_A = dilation
# Hungarian matcher
_A = class_cost
_A = bbox_cost
_A = giou_cost
# Loss coefficients
_A = mask_loss_coefficient
_A = dice_loss_coefficient
_A = bbox_loss_coefficient
_A = giou_loss_coefficient
_A = eos_coefficient
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def UpperCAmelCase ( self ) -> int:
return self.encoder_attention_heads
@property
def UpperCAmelCase ( self ) -> int:
return self.d_model
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :str = version.parse('''1.11''' )
@property
def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def UpperCAmelCase ( self ) -> float:
return 1E-5
@property
def UpperCAmelCase ( self ) -> int:
return 12
| 83 | import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
_SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
_SCREAMING_SNAKE_CASE = {'facebook/blenderbot_small-90M': 512}
def snake_case ( snake_case__ :Tuple) -> str:
_A = set()
_A = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
_A = char
_A = set(snake_case__)
return pairs
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :List[Any] = VOCAB_FILES_NAMES
lowerCamelCase :Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase :List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase :int = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="__start__" , lowerCAmelCase_="__end__" , lowerCAmelCase_="__unk__" , lowerCAmelCase_="__null__" , **lowerCAmelCase_ , ) -> int:
super().__init__(unk_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , **lowerCAmelCase_ )
with open(lowerCAmelCase_ , encoding="""utf-8""" ) as vocab_handle:
_A = json.load(lowerCAmelCase_ )
_A = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase_ , encoding="""utf-8""" ) as merges_handle:
_A = merges_handle.read().split("""\n""" )[1:-1]
_A = [tuple(merge.split() ) for merge in merges]
_A = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_A = {}
@property
def UpperCAmelCase ( self ) -> int:
return len(self.encoder )
def UpperCAmelCase ( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str:
if token in self.cache:
return self.cache[token]
_A = re.sub("""([.,!?()])""" , r""" \1""" , lowerCAmelCase_ )
_A = re.sub("""(')""" , r""" \1 """ , lowerCAmelCase_ )
_A = re.sub(r"""\s{2,}""" , """ """ , lowerCAmelCase_ )
if "\n" in token:
_A = token.replace("""\n""" , """ __newln__""" )
_A = token.split(""" """ )
_A = []
for token in tokens:
if not len(lowerCAmelCase_ ):
continue
_A = token.lower()
_A = tuple(lowerCAmelCase_ )
_A = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
_A = get_pairs(lowerCAmelCase_ )
if not pairs:
words.append(lowerCAmelCase_ )
continue
while True:
_A = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A = bigram
_A = []
_A = 0
while i < len(lowerCAmelCase_ ):
try:
_A = word.index(lowerCAmelCase_ , lowerCAmelCase_ )
new_word.extend(word[i:j] )
_A = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A = tuple(lowerCAmelCase_ )
_A = new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
_A = get_pairs(lowerCAmelCase_ )
_A = """@@ """.join(lowerCAmelCase_ )
_A = word[:-4]
_A = word
words.append(lowerCAmelCase_ )
return " ".join(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[str]:
_A = []
_A = re.findall(r"""\S+\n?""" , lowerCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(""" """ ) ) )
return split_tokens
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int:
_A = token.lower()
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str:
return self.decoder.get(lowerCAmelCase_ , self.unk_token )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str:
_A = """ """.join(lowerCAmelCase_ ).replace("""@@ """ , """""" ).strip()
return out_string
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_A = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + """\n""" )
_A = 0
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
_A = token_index
writer.write(""" """.join(lowerCAmelCase_ ) + """\n""" )
index += 1
return vocab_file, merge_file
| 83 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :List[Any] = '''blip_text_model'''
def __init__( self , lowerCAmelCase_=3_05_24 , lowerCAmelCase_=7_68 , lowerCAmelCase_=7_68 , lowerCAmelCase_=30_72 , lowerCAmelCase_=7_68 , lowerCAmelCase_=12 , lowerCAmelCase_=8 , lowerCAmelCase_=5_12 , lowerCAmelCase_="gelu" , lowerCAmelCase_=1E-12 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=3_05_22 , lowerCAmelCase_=2 , lowerCAmelCase_=0 , lowerCAmelCase_=1_02 , lowerCAmelCase_=True , lowerCAmelCase_=True , **lowerCAmelCase_ , ) -> Union[str, Any]:
super().__init__(
pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , sep_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
_A = vocab_size
_A = hidden_size
_A = encoder_hidden_size
_A = intermediate_size
_A = projection_dim
_A = hidden_dropout_prob
_A = num_hidden_layers
_A = num_attention_heads
_A = max_position_embeddings
_A = layer_norm_eps
_A = hidden_act
_A = initializer_range
_A = attention_probs_dropout_prob
_A = is_decoder
_A = use_cache
@classmethod
def UpperCAmelCase ( cls , lowerCAmelCase_ , **lowerCAmelCase_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowerCAmelCase_ )
_A , _A = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get("""model_type""" ) == "blip":
_A = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Optional[int] = '''blip_vision_model'''
def __init__( self , lowerCAmelCase_=7_68 , lowerCAmelCase_=30_72 , lowerCAmelCase_=5_12 , lowerCAmelCase_=12 , lowerCAmelCase_=12 , lowerCAmelCase_=3_84 , lowerCAmelCase_=16 , lowerCAmelCase_="gelu" , lowerCAmelCase_=1E-5 , lowerCAmelCase_=0.0 , lowerCAmelCase_=1E-10 , **lowerCAmelCase_ , ) -> Optional[Any]:
super().__init__(**lowerCAmelCase_ )
_A = hidden_size
_A = intermediate_size
_A = projection_dim
_A = num_hidden_layers
_A = num_attention_heads
_A = patch_size
_A = image_size
_A = initializer_range
_A = attention_dropout
_A = layer_norm_eps
_A = hidden_act
@classmethod
def UpperCAmelCase ( cls , lowerCAmelCase_ , **lowerCAmelCase_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowerCAmelCase_ )
_A , _A = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get("""model_type""" ) == "blip":
_A = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :int = '''blip'''
lowerCamelCase :Optional[int] = True
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=5_12 , lowerCAmelCase_=2.6592 , lowerCAmelCase_=2_56 , **lowerCAmelCase_ , ) -> int:
super().__init__(**lowerCAmelCase_ )
if text_config is None:
_A = {}
logger.info("""`text_config` is `None`. Initializing the `BlipTextConfig` with default values.""" )
if vision_config is None:
_A = {}
logger.info("""`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.""" )
_A = BlipTextConfig(**lowerCAmelCase_ )
_A = BlipVisionConfig(**lowerCAmelCase_ )
_A = self.vision_config.hidden_size
_A = projection_dim
_A = logit_scale_init_value
_A = 1.0
_A = 0.02
_A = image_text_hidden_size
@classmethod
def UpperCAmelCase ( cls , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ) -> Dict:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = copy.deepcopy(self.__dict__ )
_A = self.text_config.to_dict()
_A = self.vision_config.to_dict()
_A = self.__class__.model_type
return output
| 83 | _SCREAMING_SNAKE_CASE = {
'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.',
'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.',
'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----',
'2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...',
'8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.',
':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.',
'?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-',
'(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/'
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
_SCREAMING_SNAKE_CASE = {value: key for key, value in MORSE_CODE_DICT.items()}
def snake_case ( snake_case__ :str) -> str:
return " ".join(MORSE_CODE_DICT[char] for char in message.upper())
def snake_case ( snake_case__ :str) -> str:
return "".join(REVERSE_DICT[char] for char in message.split())
def snake_case ( ) -> None:
_A = """Morse code here!"""
print(snake_case__)
_A = encrypt(snake_case__)
print(snake_case__)
_A = decrypt(snake_case__)
print(snake_case__)
if __name__ == "__main__":
main()
| 83 | 1 |
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> Dict:
_A = path_or_paths
_A = split if split or isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else """train"""
_A = features
_A = cache_dir
_A = keep_in_memory
_A = streaming
_A = num_proc
_A = kwargs
@abstractmethod
def UpperCAmelCase ( self ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
pass
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> Dict:
_A = features
_A = cache_dir
_A = keep_in_memory
_A = streaming
_A = num_proc
_A = kwargs
@abstractmethod
def UpperCAmelCase ( self ) -> Union[Dataset, IterableDataset]:
pass
| 83 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 83 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 83 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Tuple = '''philschmid/bart-large-cnn-samsum'''
lowerCamelCase :Tuple = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
lowerCamelCase :List[Any] = '''summarizer'''
lowerCamelCase :List[str] = AutoTokenizer
lowerCamelCase :Dict = AutoModelForSeqaSeqLM
lowerCamelCase :int = ['''text''']
lowerCamelCase :List[Any] = ['''text''']
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
return self.pre_processor(lowerCAmelCase_ , return_tensors="""pt""" , truncation=lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
return self.model.generate(**lowerCAmelCase_ )[0]
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
return self.pre_processor.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
| 83 | 1 |
from math import factorial
def snake_case ( snake_case__ :int , snake_case__ :int) -> int:
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError("""Please enter positive integers for n and k where n >= k""")
return factorial(snake_case__) // (factorial(snake_case__) * factorial(n - k))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
F'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
'If a class of 40 students must be arranged into groups of',
F'''4 for group projects, there are {combinations(40, 4)} ways''',
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
F'''are {combinations(10, 3)} ways that first, second and''',
'third place can be awarded.',
)
| 83 | import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
_SCREAMING_SNAKE_CASE = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def snake_case ( snake_case__ :Union[str, Any]) -> Dict:
_A = torch.load(snake_case__ , map_location="""cpu""")
return sd
def snake_case ( snake_case__ :List[str] , snake_case__ :Optional[Any] , snake_case__ :int=rename_keys_prefix) -> Optional[Any]:
_A = OrderedDict()
_A = torch.arange(config.max_position_embeddings).expand((1, -1))
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_A = key
for name_pair in rename_keys_prefix:
_A = new_key.replace(name_pair[0] , name_pair[1])
_A = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_A = new_d["""cls.predictions.bias"""]
return new_d
@torch.no_grad()
def snake_case ( snake_case__ :Tuple , snake_case__ :Tuple) -> int:
assert (
checkpoint_path.split("""/""")[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
_A = """pretraining"""
if "vcr" in checkpoint_path:
_A = {"""visual_embedding_dim""": 512}
elif "vqa_advanced" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048}
elif "vqa" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048}
elif "nlvr" in checkpoint_path:
_A = {"""visual_embedding_dim""": 1_024}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''')
else:
if "vcr" in checkpoint_path:
_A = {"""visual_embedding_dim""": 512}
_A = """multichoice"""
elif "vqa_advanced" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048}
_A = """vqa_advanced"""
elif "vqa" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048, """num_labels""": 3_129}
_A = """vqa"""
elif "nlvr" in checkpoint_path:
_A = {
"""visual_embedding_dim""": 1_024,
"""num_labels""": 2,
}
_A = """nlvr"""
_A = VisualBertConfig(**snake_case__)
# Load State Dict
_A = load_state_dict(snake_case__)
_A = get_new_dict(snake_case__ , snake_case__)
if model_type == "pretraining":
_A = VisualBertForPreTraining(snake_case__)
elif model_type == "vqa":
_A = VisualBertForQuestionAnswering(snake_case__)
elif model_type == "nlvr":
_A = VisualBertForVisualReasoning(snake_case__)
elif model_type == "multichoice":
_A = VisualBertForMultipleChoice(snake_case__)
model.load_state_dict(snake_case__)
# Save Checkpoints
Path(snake_case__).mkdir(exist_ok=snake_case__)
model.save_pretrained(snake_case__)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 83 | 1 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE = get_tests_dir('fixtures/test_sentencepiece.model')
_SCREAMING_SNAKE_CASE = {'target_lang': 'fi', 'source_lang': 'en'}
_SCREAMING_SNAKE_CASE = '>>zh<<'
_SCREAMING_SNAKE_CASE = 'Helsinki-NLP/'
if is_torch_available():
_SCREAMING_SNAKE_CASE = 'pt'
elif is_tf_available():
_SCREAMING_SNAKE_CASE = 'tf'
else:
_SCREAMING_SNAKE_CASE = 'jax'
@require_sentencepiece
class a ( __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :List[str] = MarianTokenizer
lowerCamelCase :Any = False
lowerCamelCase :str = True
def UpperCAmelCase ( self ) -> List[str]:
super().setUp()
_A = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
_A = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_A = Path(self.tmpdirname )
save_json(lowerCAmelCase_ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(lowerCAmelCase_ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowerCAmelCase_ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(lowerCAmelCase_ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
_A = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]:
return (
"This is a test",
"This is a test",
)
def UpperCAmelCase ( self ) -> Tuple:
_A = """</s>"""
_A = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Any:
_A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(lowerCAmelCase_ ) , 9 )
def UpperCAmelCase ( self ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' )
_A = en_de_tokenizer(["""I am a small frog"""] , return_tensors=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
_A = [38, 1_21, 14, 6_97, 3_88_48, 0]
self.assertListEqual(lowerCAmelCase_ , batch.input_ids[0] )
_A = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowerCAmelCase_ )
_A = [x.name for x in Path(lowerCAmelCase_ ).glob("""*""" )]
self.assertIn("""source.spm""" , lowerCAmelCase_ )
MarianTokenizer.from_pretrained(lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = self.get_tokenizer()
_A = tok(
["""I am a small frog""" * 10_00, """I am a small frog"""] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 5_12) )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.get_tokenizer()
_A = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def UpperCAmelCase ( self ) -> List[str]:
# fmt: off
_A = {"""input_ids""": [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
_A = """Tämä on testi"""
_A = """This is a test"""
_A = [76, 7, 20_47, 2]
_A = [69, 12, 11, 9_40, 2]
_A = tokenizer(lowerCAmelCase_ ).input_ids
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_A = tokenizer(text_target=lowerCAmelCase_ ).input_ids
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_A = tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 83 | from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class a ( __lowerCAmelCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[str]:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def UpperCAmelCase ( self ) -> Optional[int]:
_A = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self._create_example_records()
_A = Dataset.from_list(lowerCAmelCase_ )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(lowerCAmelCase_ ):
self.assertDictEqual(lowerCAmelCase_ , example_records[i] )
def UpperCAmelCase ( self ) -> str:
_A = self._create_example_records()
_A = Dataset.from_list(lowerCAmelCase_ )
_A = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def UpperCAmelCase ( self ) -> Any: # checks what happens with missing columns
_A = [{"""col_1""": 1}, {"""col_2""": """x"""}]
_A = Dataset.from_list(lowerCAmelCase_ )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def UpperCAmelCase ( self ) -> Tuple: # checks if the type can be inferred from the second record
_A = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
_A = Dataset.from_list(lowerCAmelCase_ )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def UpperCAmelCase ( self ) -> Any:
_A = Dataset.from_list([] )
self.assertEqual(len(lowerCAmelCase_ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 83 | 1 |
def snake_case ( snake_case__ :int , snake_case__ :int , snake_case__ :list[list[int]]) -> int:
def update_area_of_max_square(snake_case__ :int , snake_case__ :int) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
_A = update_area_of_max_square(snake_case__ , col + 1)
_A = update_area_of_max_square(row + 1 , col + 1)
_A = update_area_of_max_square(row + 1 , snake_case__)
if mat[row][col]:
_A = 1 + min([right, diagonal, down])
_A = max(largest_square_area[0] , snake_case__)
return sub_problem_sol
else:
return 0
_A = [0]
update_area_of_max_square(0 , 0)
return largest_square_area[0]
def snake_case ( snake_case__ :int , snake_case__ :int , snake_case__ :list[list[int]]) -> int:
def update_area_of_max_square_using_dp_array(
snake_case__ :int , snake_case__ :int , snake_case__ :list[list[int]]) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
_A = update_area_of_max_square_using_dp_array(snake_case__ , col + 1 , snake_case__)
_A = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , snake_case__)
_A = update_area_of_max_square_using_dp_array(row + 1 , snake_case__ , snake_case__)
if mat[row][col]:
_A = 1 + min([right, diagonal, down])
_A = max(largest_square_area[0] , snake_case__)
_A = sub_problem_sol
return sub_problem_sol
else:
return 0
_A = [0]
_A = [[-1] * cols for _ in range(snake_case__)]
update_area_of_max_square_using_dp_array(0 , 0 , snake_case__)
return largest_square_area[0]
def snake_case ( snake_case__ :int , snake_case__ :int , snake_case__ :list[list[int]]) -> int:
_A = [[0] * (cols + 1) for _ in range(rows + 1)]
_A = 0
for row in range(rows - 1 , -1 , -1):
for col in range(cols - 1 , -1 , -1):
_A = dp_array[row][col + 1]
_A = dp_array[row + 1][col + 1]
_A = dp_array[row + 1][col]
if mat[row][col] == 1:
_A = 1 + min(snake_case__ , snake_case__ , snake_case__)
_A = max(dp_array[row][col] , snake_case__)
else:
_A = 0
return largest_square_area
def snake_case ( snake_case__ :int , snake_case__ :int , snake_case__ :list[list[int]]) -> int:
_A = [0] * (cols + 1)
_A = [0] * (cols + 1)
_A = 0
for row in range(rows - 1 , -1 , -1):
for col in range(cols - 1 , -1 , -1):
_A = current_row[col + 1]
_A = next_row[col + 1]
_A = next_row[col]
if mat[row][col] == 1:
_A = 1 + min(snake_case__ , snake_case__ , snake_case__)
_A = max(current_row[col] , snake_case__)
else:
_A = 0
_A = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 83 | def snake_case ( snake_case__ :int = 1_000_000) -> int:
_A = set(range(3 , snake_case__ , 2))
primes.add(2)
for p in range(3 , snake_case__ , 2):
if p not in primes:
continue
primes.difference_update(set(range(p * p , snake_case__ , snake_case__)))
_A = [float(snake_case__) for n in range(limit + 1)]
for p in primes:
for n in range(snake_case__ , limit + 1 , snake_case__):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:]))
if __name__ == "__main__":
print(F'''{solution() = }''')
| 83 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.